Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
abess
abess-master/docs/Tutorial/4-computation-tips/plot_large_sample.py
""" Large-Sample Data ================= """ # %% # Introduction # ^^^^^^^^^^^^^^^^^^^^^^^ # # .. image:: ../../Tutorial/figure/large-sample.png # # A large sample size leads to a large range of possible support sizes which adds to the computational burdon. # The computational tip here is to use the golden-section searching to avoid support size enumeration. # %% # A motivated observation # ^^^^^^^^^^^^^^^^^^^^^^^ # Here we generate a simple example under linear model via ``make_glm_data``. from time import time import numpy as np import matplotlib.pyplot as plt from abess.datasets import make_glm_data from abess.linear import LinearRegression np.random.seed(0) data = make_glm_data(n=100, p=20, k=5, family='gaussian') ic = np.zeros(21) for sz in range(21): model = LinearRegression(support_size=[sz], ic_type='ebic') model.fit(data.x, data.y) ic[sz] = model.eval_loss_ print("lowest point: ", np.argmin(ic)) # %% # The generated data contains 100 observations with 20 predictors, # while 5 of them are useful (has non-zero coefficients). # Uses extended Bayesian information criterion (EBIC), the ``abess`` successfully detect the true support size. # # We go further and take a look on the support size versus EBIC returned # by ``LinearRegression`` in ``abess.linear``. # %% plt.plot(ic, 'o-') plt.xlabel('support size') plt.ylabel('EBIC') plt.title('Model Selection via EBIC') plt.show() # %% # From the figure, we can find that # the curve should is a strictly unimodal function achieving minimum at the true subset size, # where ``support_size = 5`` is the lowest point. # # Motivated by this observation, we consider a golden-section search technique to determine the optimal support size # associated with the minimum EBIC. # # .. image:: ../../Tutorial/figure/goldenSection.png # # Compare to the sequential searching, the golden section is much faster because it skip some support sizes which are likely to be a non-optimal one. # Precisely, searching the optimal support size one by one from a candidate set with :math:`O(s_{max})` complexity, # **golden-section** reduce the time complexity to :math:`O(\ln(s_{max}))`, giving a significant computational improvement. # # %% # Usage: golden-section # ^^^^^^^^^^^^^^^^^^^^^ # In ``abess`` package, golden-section technique can be easily formed like: model = LinearRegression(path_type='gs', s_min=0, s_max=20) model.fit(data.x, data.y) print("real coef:\n", np.nonzero(data.coef_)[0]) print("predicted coef:\n", np.nonzero(model.coef_)[0]) # %% # # where ``path_type = 'gs'`` means using golden-section rather than search the support size one-by-one. # ``s_min`` and ``s_max`` indicates the left and right bound of range of the support size. # Note that in golden-section searching, we should not give ``support_size``, which is only useful for sequential strategy. # # The output of golden-section strategy suggests the optimal model size is accurately detected. # # %% # Golden-section v.s. Sequential-searching: runtime comparison # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # In this part, we perform a runtime comparison experiment to demonstrate the speed gain brought by golden-section. # t1 = time() model = LinearRegression(support_size=range(21)) model.fit(data.x, data.y) print("sequential time: ", time() - t1) t2 = time() model = LinearRegression(path_type='gs', s_min=0, s_max=20) model.fit(data.x, data.y) print("golden-section time: ", time() - t2) # %% # The golden-section runs much faster than sequential method. # The speed gain would be enlarged when the range of support size is larger. ############################################################################### # # The ``abess`` R package also supports golden-section. # For R tutorial, please view # https://abess-team.github.io/abess/articles/v09-fasterSetting.html. # # sphinx_gallery_thumbnail_path = 'Tutorial/figure/large-sample.png' #
3,940
33.570175
149
py
abess
abess-master/docs/Tutorial/4-computation-tips/plot_sparse_inputs.py
""" Sparse Inputs ============= We sometimes meet with problems where the :math:`N × p` input matrix :math:`X` is extremely sparse, i.e., many entries in :math:`X` have zero values. A notable example comes from document classification: aiming to assign classes to a document, making it easier to manage for publishers and news sites. The input variables for characterizing documents are generated from a so called "bag-of-words" model. In this model, each variable is scored for the presence of each of the words in the entire dictionary under consideration. Since most words are absent, the input variables for each document is mostly zero, and so the entire matrix is mostly zero. """ # %% # Example # ^^^^^^^ # We create a sparse matrix as our example: # from time import time from abess import LinearRegression from scipy.sparse import coo_matrix import numpy as np import matplotlib.pyplot as plt row = np.array([0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9]) col = np.array([0, 3, 1, 2, 4, 3, 5, 2, 3, 1, 5, 2]) data = np.array([4, 5, 7, 9, 1, 23, 4, 5, 6, 8, 77, 100]) x = coo_matrix((data, (row, col))) # %% # And visualize the sparsity pattern via: plt.spy(x) plt.show() # %% # Usage: sparse matrix # ^^^^^^^^^^^^^^^^^^^^ # The sparse matrix can be directly used in ``abess`` pacakages. We just # need to set argument ``sparse_matrix = True``. Note that if the input # matrix is not sparse matrix, the program would automatically transfer it # into the sparse one, so this argument can also make some improvement. coef = np.array([1, 1, 1, 0, 0, 0]) y = x.dot(coef) model = LinearRegression() model.fit(x, y, sparse_matrix=True) print("real coef: \n", coef) print("pred coef: \n", model.coef_) # %% # Sparse v.s. Dense: runtime comparsion # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # We compare the runtime under a larger sparse data: # from scipy.sparse import rand from numpy.random import default_rng rng = default_rng(12345) x = rand(1000, 200, density=0.01, format='coo', random_state=rng) coef = np.repeat([1, 0], 100) y = x.dot(coef) t = time() model.fit(x.toarray(), y) print("dense matrix: ", time() - t) t = time() model.fit(x, y, sparse_matrix=True) print("sparse matrix: ", time() - t) # %% # From the comparison, we see that the time required by sparse matrix is smaller, # and this sould be more visible when the sparse imput matrix is large. # Hence, we suggest to assign a sparse matrix to ``abess`` when the input matrix have a lot of zero entries. # # The ``abess`` R package also supports sparse matrix. For R tutorial, # please view # https://abess-team.github.io/abess/articles/v09-fasterSetting.html
2,632
32.75641
159
py
abess
abess-master/docs/Tutorial/4-computation-tips/plot_specific_models.py
""" Specific Models =============== """ ########################################## # Introduction # ^^^^^^^^^^^^ # From the algorithm preseneted in “`ABESS algorithm: details <https://abess.readthedocs.io/en/latest/auto_gallery/1-glm/plot_a2_abess_algorithm_details.html>`__”, # one of the bottleneck in algorithm is the computation of forward and backward sacrifices, # which requires conducting iterative algorithms or frequently visiting :math:`p` variables. # To improve computational efficiency, # we designed specialize strategies for computing forward and backward sacrifices for different models. # The specialize strategies is roughly divide into two classes: (i) covariance update for (multivariate) linear model; # (ii) quasi Newton iteration for non-linear model (e.g., logistic regression). # We going to specify the two strategies as follows. # # Covariance update # ^^^^^^^^^^^^^^^^^ # Under linear model, the core bottleneck is computing sacrifices, e.g. the foreward sacrifices, # # .. math:: \zeta_{j}=\mathcal{L}_{n}\left(\hat{\boldsymbol{\beta}^{\mathcal{A}}}\right)-\mathcal{L}_{n}\left(\hat{\boldsymbol{\beta}}^{\mathcal{A}}+\hat{t}^{\{j\}}\right)=\frac{X_{j}^{\top} X_{j}}{2 n}\left(\frac{\hat{\boldsymbol d}_{j}}{X_{j}^{\top} X_{j} / n}\right)^{2}. # # where # :math:`\hat{t}=\arg \min _{t} \mathcal{L}_{n}\left(\hat{\boldsymbol{\beta}}^{\mathcal{A}}+t^{\{j\}}\right), \hat{\boldsymbol d}_{j}=X_{j}^{\top}(y-X \hat{\boldsymbol{\beta}}) / n`. # Intuitively, for :math:`j \in \mathcal{A}` (or # :math:`j \in \mathcal{I}` ), a large :math:`\xi_{j}` (or # :math:`\zeta_{j}`) implies the :math:`j` th variable is potentially # important. # # It would take a lot of time on calculating :math:`X^T_jy`, :math:`X^T_jX_j` and its inverse. # To speed up, it is actually no need to recompute these items at each splicing process. # Instead, they can be stored when first calculated, which is what we call # "covariance update". # # It is easy to enable this feature with an additional argument # ``covariance_update=True`` for linear model, for example: import numpy as np from time import time from abess.linear import LinearRegression from abess.datasets import make_glm_data np.random.seed(1) data = make_glm_data(n=10000, p=100, k=10, family='gaussian') model1 = LinearRegression() model2 = LinearRegression(covariance_update=True) t1 = time() model1.fit(data.x, data.y) t1 = time() - t1 t2 = time() model2.fit(data.x, data.y) t2 = time() - t2 print(f"No covariance update: {t1}") print(f"Covariance update: {t2}") print(f"Same answer? {(model1.coef_==model2.coef_).all()}") # %% # We can see that covariance update improve computation # when sample size :math:`n` is much larger than dimension :math:`p`. # # However, we have to point out that covariance update will cause higher memory usage, especially when :math:`p` is large. # So, we recommend to enable covariance update for fast computation when sample size is much larger than dimension # and dimension is moderate (:math:`p \leq 2000`). # %% # Quasi Newton iteration # ^^^^^^^^^^^^^^^^^^^^^^ # In the third step in `Algorithm 2 <https://abess.readthedocs.io/en/latest/auto_gallery/1-glm/plot_a2_abess_algorithm_details.html#algorithm-2-splicing-left-boldsymbol-beta-d-mathcal-a-mathcal-i-k-max-tau-s-right>`__ # , we need to solve a convex optimization problem: # # .. math:: # \tilde{\beta} = \arg\min_{\text{supp}(\beta) = \tilde{\mathcal{A}} } l_n(\beta ). # # # But generally, it has no closed-form solution, and has to be solved via iterative algorithm. # A natural method for solving this problem is Netwon method, i.e., # conduct the update: # # .. math:: # \beta_{\tilde{\mathcal{A}} }^{m+1} \leftarrow \boldsymbol \beta_{\tilde{\mathcal{A}} }^m - \Big( \left.\frac{\partial^2 l_n( \boldsymbol \beta )}{ (\partial \boldsymbol \beta_{\tilde{\mathcal{A}}} )^2 }\right|_{\boldsymbol \beta = \boldsymbol \beta^m} \Big)^{-1} \Big( \left.\frac{\partial l_n( \boldsymbol \beta )}{ \partial \boldsymbol \beta_{\tilde{\mathcal{A}}} }\right|_{\boldsymbol \beta = \boldsymbol \beta^m} \Big), # # # until :math:`\| \beta_{\tilde{\mathcal{A}} }^{m+1} - \beta_{\tilde{\mathcal{A}} }^{m}\|_2 \leq \epsilon` or :math:`m \geq k`, # where :math:`\epsilon, k` are two user-specific parameters. # Generally, setting :math:`\epsilon = 10^{-6}` and :math:`k = 80` achieves desirable estimation. # Generally, the inverse of second derivative is computationally intensive, and thus, # we approximate it with its diagonalized version. Then, the update formulate changes to: # # .. math:: # \beta_{\tilde{\mathcal{A}} }^{m+1} \leftarrow \boldsymbol \beta_{\tilde{\mathcal{A}} }^m - \rho D \Big( \left.\frac{\partial l_n( \boldsymbol \beta )}{ \partial \boldsymbol \beta_{\tilde{\mathcal{A}}} }\right|_{\boldsymbol \beta = \boldsymbol \beta^m} \Big), # # # where :math:`D = \textup{diag}( (\left.\frac{\partial^2 l_n( \boldsymbol \beta )}{ (\partial \boldsymbol \beta_{\tilde{\mathcal{A}_{1}}} )^2 }\right|_{\boldsymbol \beta = \boldsymbol \beta^m} )^{-1}, \ldots, (\left.\frac{\partial^2 l_n( \boldsymbol \beta )}{ (\partial \boldsymbol \beta_{\tilde{\mathcal{A}}_{|A|}} )^2 }\right|_{\boldsymbol \beta = \boldsymbol \beta^m} )^{-1})` # and :math:`\rho`` is step size. # Although using the approximation may increase the iteration time, # it avoids a large computational complexity when computing the matrix inversion. # Furthermore, we use a heuristic strategy to reduce the iteration time. # Observing that not every new support after exchanging the elements in active set and inactive set # may not reduce the loss function, # we can early stop the newton iteration on these support. # Specifically, support :math:`l_1 = L({\beta}^{m}), l_2 = L({\beta}^{m+1})`, # if :math:`l_1 - (k - m - 1) \times (l_2 - l_1)) > L - \tau`, # then we can expect the new support cannot lead to a better loss after :math:`k` iteration, # and hence, it is no need to conduct the remaining :math:`k - m - 1` times Newton update. # This heuristic strategy is motivated by the convergence rate of Netwon method is linear at least. # |image0| # # To enable this feature, you can simply give an additional argument ``approximate_Newton=True``. # The :math:`\epsilon` and :math:`k` we mentioned before, can be set with ``primary_model_fit_epsilon`` # and ``primary_model_fit_max_iter``, respectively. For example: import numpy as np from time import time from abess.linear import LogisticRegression from abess.datasets import make_glm_data np.random.seed(1) data = make_glm_data(n=1000, p=100, k=10, family='binomial') model1 = LogisticRegression() model2 = LogisticRegression(approximate_Newton=True, primary_model_fit_epsilon=1e-6, primary_model_fit_max_iter=10) t1 = time() model1.fit(data.x, data.y) t1 = time() - t1 t2 = time() model2.fit(data.x, data.y) t2 = time() - t2 print(f"No newton: {t1}") print(f"Newton: {t2}") print(f"Same answer? {(np.nonzero(model1.coef_)[0]==np.nonzero(model2.coef_)[0]).all()}") # %% # # The ``abess`` R package also supports covariance update and quasi Newton iteration. # For R tutorial, please view https://abess-team.github.io/abess/articles/v09-fasterSetting.html # # .. |image0| image:: ../../Tutorial/figure/convergence_rates.png
7,337
48.918367
445
py
abess
abess-master/docs/Tutorial/5-scikit-learn-connection/plot_1_scikit_learn.py
""" Work with scikit-learn ====================== """ # %% # ``abess`` is very easy to work with the famous package ``scikit-learn``, and here is an example. # We going to illustrate the integration of the ``abess`` with ``scikit-learn``’s pre-processing and model selection modules to # build a non-linear model for diagnosing malignant tumors. # Let start with importing necessary dependencies: import numpy as np from abess.datasets import make_glm_data from abess.linear import LinearRegression, LogisticRegression from sklearn.datasets import fetch_openml, load_breast_cancer from sklearn.pipeline import Pipeline, make_pipeline from sklearn.metrics import roc_auc_score, make_scorer, roc_curve, auc from sklearn.compose import ColumnTransformer from sklearn.preprocessing import MinMaxScaler, OneHotEncoder, PolynomialFeatures, StandardScaler from sklearn.model_selection import GridSearchCV, TimeSeriesSplit, cross_val_score from sklearn.feature_selection import SelectFromModel #%% # Establish the process # --------------------- # Suppose we would like to extend the original variables to their # interactions, and then do ``LogisticRegression`` on them. This can be # record with ``Pipeline``: pipe = Pipeline([ ('poly', PolynomialFeatures(include_bias=False)), # without intercept ('standard', StandardScaler()), ('alogistic', LogisticRegression()) ]) #%% # Parameter grid # -------------- # We can give different parameters to model and let the program choose the # best. Here we should give parameters for ``PolynomialFeatures``, for # example: param_grid = { # whether the "self-combination" (e.g. X^2, X^3) exists 'poly__interaction_only': [True, False], 'poly__degree': [1, 2, 3] # the degree of polynomial } # %% # Note that the program would try all combinations of what we give, which means that there are :math:`2\times3=6` combinations of parameters will be tried. # # Criterion # --------- # After giving a grid of parameters, we should define what is a "better" # result. For example, the AUC (area under ROC curve) can be a criterion # and the larger, the better. scorer = make_scorer(roc_auc_score, greater_is_better=True) #%% # Cross Validation # ---------------- # For more accurate results, cross validation (CV) is often formed. #%% # Suppose that the data is independent and identically distributed (i.i.d.) # that all samples stem from the same generative process # and that the generative process has no memory of past generated samples. # A typical CV strategy is K-fold and a corresponding grid search procedure # can be made as follows: grid_search = GridSearchCV(pipe, param_grid, scoring=scorer, cv=5) #%% # However, if there exists correlation between observations (e.g. time-series data), # K-fold strategy is not appropriate any more. An alternative CV strategy is ``TimeSeriesSplit``. # It is a variation of K-fold which returns first K folds as train set and the # (K+1)-th fold as test set. #%% # The following example shows a combinatioon of ``abess`` # and ``TimeSeriesSplit`` applied to ``Bike_Sharing_Demand`` dataset and it returns the # cv score of a specific choice of ``support_size``. bike_sharing = fetch_openml('Bike_Sharing_Demand', version=2, as_frame=True) df = bike_sharing.frame X = df.drop('count', axis='columns') y = df['count'] / df['count'].max() ts_cv = TimeSeriesSplit( n_splits=5, gap=48, max_train_size=10000, test_size=1000, ) categorical_columns = ['weather', 'season', 'holiday', 'workingday',] one_hot_encoder = OneHotEncoder(handle_unknown='ignore') one_hot_abess_pipeline = make_pipeline( ColumnTransformer( transformers=[ ('categorical', one_hot_encoder, categorical_columns), ('one_hot_time', one_hot_encoder, ['hour', 'weekday', 'month']), ], remainder=MinMaxScaler(), ), LinearRegression(support_size=5), ) scores = cross_val_score(one_hot_abess_pipeline, X, y, cv=ts_cv) print("%0.2f score with a standard deviation of %0.2f" % (scores.mean(), scores.std())) #%% # Model fitting # ------------- # Eveything is prepared now. We can simply load the data and put it into # ``grid_search``: X, y = load_breast_cancer(return_X_y=True) grid_search.fit(X, y) print([grid_search.best_score_, grid_search.best_params_]) # %% # The output of the code reports the information of the polynomial features for the selected model among candidates, # and its corresponding area under the curve (AUC), which is over 0.97, # indicating the selected model would have an admirable contribution in practice. # # Moreover, the best choice of parameter combination is shown above: 2 degree with "self-combination", # implying the inclusion of the pairwise interactions between any two # features can lead to a better model generalization. # %% # Here is its ROC curve: import matplotlib.pyplot as plt proba = grid_search.predict_proba(X) fpr, tpr, _ = roc_curve(y, proba[:, 1]) plt.plot(fpr, tpr) plt.plot([0, 1], [0, 1], 'k--', label="ROC curve (area = %0.2f)" % auc(fpr, tpr)) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title("Receiver operating characteristic (ROC) curve") plt.legend(loc="lower right") plt.show() #%% # Feature selection # ------------------ #%% # Besides being used to make prediction explicitly, ``abess`` can be exploited to # select important features. # The following example shows how to perform abess-based feature selection # using ``sklearn.feature_selection.SelectFromModel``. #%% np.random.seed(0) n, p, k = 300, 1000, 5 data = make_glm_data(n=n, p=p, k=k, family='gaussian') X, y = data.x, data.y print('Shape of original data: ', X.shape) model = LinearRegression().fit(X, y) sfm = SelectFromModel(model, prefit=True) X_new = sfm.transform(X) print('Shape of transformed data: ', X_new.shape) # %% # sphinx_gallery_thumbnail_path = 'Tutorial/figure/scikit_learn.png'
5,954
32.268156
155
py
abess
abess-master/docs/Tutorial/5-scikit-learn-connection/plot_2_geomstats.py
""" Work with geomstats =================== """ # %% # The package `geomstats` is used for computations and statistics on nonlinear manifolds, # such as Hypersphere,Hyperbolic Space, Symmetric-Positive-Definite (SPD) Matrices Space and Skew-Symmetric Matrices Space. # `abess` also works well with the package `geomstats`. # Here is an example of using `abess` to do logistic regression of samples on Hypersphere, # and we will compare the precision score, the recall score and the running time with `abess` and with `scikit-learn`. import numpy as np import matplotlib.pyplot as plt import geomstats.backend as gs import geomstats.visualization as visualization from geomstats.learning.frechet_mean import FrechetMean from geomstats.geometry.hypersphere import Hypersphere from sklearn.model_selection import train_test_split from sklearn.metrics import precision_score, recall_score from sklearn.linear_model import LogisticRegression as sklLogisticRegression from abess import LogisticRegression import time import warnings warnings.filterwarnings("ignore") gs.random.seed(0) ############################################################################### # An Example # ---------- # Two sets of samples on Hypersphere in 3-dimensional Euclidean Space are created. # The sample points in `data0` are distributed around :math:`[-3/5, 0, 4/5]`, and the sample points in `data1` are distributed around :math:`[3/5, 0, 4/5]`. # The sample size of both is set to 100, and the precision of both is set to 5. # The two sets of samples are shown in the figure below. sphere = Hypersphere(dim=2) data0 = sphere.random_riemannian_normal(mean=np.array([-3/5, 0, 4/5]), n_samples=100, precision=5) data1 = sphere.random_riemannian_normal(mean=np.array([3/5, 0, 4/5]), n_samples=100, precision=5) fig = plt.figure(figsize=(8, 8)) ax = visualization.plot(data0, space="S2", color="black", alpha=0.7, label="data0 points") ax = visualization.plot(data1, space="S2", color="red", alpha=0.7, label="data1 points") ax.set_box_aspect([1, 1, 1]) ax.legend() plt.show() # %% # Then, we divide the data into `train_data` and `test_data`, and calculate the frechit mean of `train_data`, # which has the minimum sum of the squares of the distances along the geodesic to each sample point in `train_data`. # The `test_data`,the `train_data` and the frechit mean are shown in the figure below. labels = np.concatenate((np.zeros(data0.shape[0]),np.ones(data1.shape[0]))) data = np.concatenate((data0,data1)) train_data, test_data, train_labels, test_labels = train_test_split(data, labels, test_size=0.33, random_state=0) mean = FrechetMean(metric=sphere.metric) mean.fit(train_data) mean_estimate = mean.estimate_ fig = plt.figure(figsize=(8, 8)) ax = visualization.plot(train_data, space="S2", color="black", alpha=0.5, label="train data") ax = visualization.plot(test_data, space="S2", color="brown", alpha=0.5, label="test data") ax = visualization.plot(mean_estimate, space="S2", color="blue", s=100, label="frechet mean") ax.set_box_aspect([1, 1, 1]) ax.legend() plt.show() # %% # Next, do the logarithm map for all sample points from the frechit mean. # That is, map each sample point to which point on the tangential of the geodesic (from the frechit mean to the sample point) # at the frechit mean and has the distance to the frechit that equals to the length of the geodesic. log_train_data = sphere.metric.log(train_data, mean_estimate) log_test_data = sphere.metric.log(test_data, mean_estimate) # %% # The following figure shows the logarithm mapping of `train_data[5]` from the frechit mean. geodesic = sphere.metric.geodesic(mean_estimate, end_point=train_data[5]) points_on_geodesic = geodesic(gs.linspace(0.0, 1.0, 30)) fig = plt.figure(figsize=(8, 8)) ax = fig.add_subplot(111, projection="3d") ax = visualization.plot(mean_estimate, space="S2", color="blue", s=100, label="frechet mean") ax = visualization.plot(train_data[5], space="S2", color="red", s=100, label="train_data[5]") ax = visualization.plot(points_on_geodesic, ax=ax, space="S2", color="black", alpha=0.5, label="Geodesic") arrow = visualization.Arrow3D(mean_estimate, vector=log_train_data[5]) arrow.draw(ax, color="black") ax.legend(); plt.show() # %% # After that, the samples are naturally distributed on a linear area. # Then, some common analysis methods can be used to analyze this set of data, such as LogisticRegression from `abess`. model = LogisticRegression(support_size= range(0,4)) model.fit(log_train_data, train_labels) fitted_labels = model.predict(log_test_data) print('Used variables\' index:', np.nonzero(model.coef_ != 0)[0]) print('accuracy:',sum((fitted_labels - test_labels + 1) % 2)/test_data.shape[0]) # %% # The result shows that the only variables' index it used is :math:`[0]`. # When constructing the samples, the means of the two sets are only different in the 0th direction. # It shows that `abess` correctly identifies the most relevant variable for classification. ############################################################################### # Comparison # ---------- # Here is the comparison of the precision score and the recall score with `abess` and `scikit-learn`, and # the comparison of the running time with `abess` and `scikit-learn`. # # We loop 50 times. # At each time, two sets of samples on Hypersphere in 10-dimensional Euclidean Space are created. # The sample points in `data0` are distributed around :math:`[1 / 3, 0, 2 / 3, 0, 2 / 3, 0, 0, 0, 0, 0]`, and # the sample points in `data1` are distributed around :math:`[0, 0, 2 / 3, 0, 2 / 3, 0, 0, 0, 0, 1 / 3]`. # The sample size of both is set to 200, and the precision of both is set to 5. m = 50 # cycles n_sam = 200 s = 10 pre = 5 sphere = Hypersphere(dim=s - 1) labels = np.concatenate((np.zeros(n_sam), np.ones(n_sam))) abess_precision_score = np.zeros(m) skl_precision_score = np.zeros(m) abess_recall_score = np.zeros(m) skl_recall_score = np.zeros(m) abess_geo_time = np.zeros(m) skl_geo_time = np.zeros(m) for i in range(m): data0 = sphere.random_riemannian_normal(mean=np.array([1 / 3, 0, 2 / 3, 0, 2 / 3, 0, 0, 0, 0, 0]), n_samples=n_sam, precision=pre) data1 = sphere.random_riemannian_normal(mean=np.array([0, 0, 2 / 3, 0, 2 / 3, 0, 0, 0, 0, 1 / 3]), n_samples=n_sam, precision=pre) data = np.concatenate((data0, data1)) train_data, test_data, train_labels, test_labels = train_test_split(data, labels, test_size=0.33, random_state=0) mean = FrechetMean(metric=sphere.metric) mean.fit(train_data) mean_estimate = mean.estimate_ log_train_data = sphere.metric.log(train_data, mean_estimate) log_test_data = sphere.metric.log(test_data, mean_estimate) start = time.time() abess_geo_model = LogisticRegression(support_size=range(0, s + 1)).fit(log_train_data, train_labels) abess_geo_fitted_labels = abess_geo_model.predict(log_test_data) end = time.time() abess_geo_time[i] = end - start abess_precision_score[i] = precision_score(test_labels, abess_geo_fitted_labels, average='micro') abess_recall_score[i] = recall_score(test_labels, abess_geo_fitted_labels, average='micro') start = time.time() skl_geo_model = sklLogisticRegression().fit(X=log_train_data, y=train_labels) skl_geo_fitted_labels = skl_geo_model.predict(log_test_data) end = time.time() skl_geo_time[i] = end - start skl_precision_score[i] = precision_score(test_labels, skl_geo_fitted_labels, average='micro') skl_recall_score[i] = recall_score(test_labels, skl_geo_fitted_labels, average='micro') # %% # The following figures show the precision score and the recall score with `abess` or `scikit-learn`. fig = plt.figure(figsize=(15,5)) ax1 = fig.add_subplot(121) ax1.boxplot([abess_precision_score, skl_precision_score], patch_artist='Patch', labels = ['abess', 'scikit-learn'], boxprops = {'color':'black','facecolor':'yellow'} ) ax1.set_title('precision score with abess or scikit-learn') ax1.set_ylabel('precision score') ax2 = fig.add_subplot(122) ax2.boxplot([abess_recall_score, skl_recall_score], patch_artist='Patch', labels = ['abess', 'scikit-learn'], boxprops = {'color':'black','facecolor':'yellow'} ) ax2.set_title('recall score with abess or scikit-learn') ax2.set_ylabel('recall score') plt.show() # %% # The following figure shows the running time with `abess` or `scikit-learn`. abess_geo_time_mean = np.mean(abess_geo_time) skl_geo_time_mean = np.mean(skl_geo_time) abess_geo_time_std = np.std(abess_geo_time) skl_geo_time_std = np.std(skl_geo_time) meth = ['abess', 'scikit-learn'] x_pos = np.arange(len(meth)) CTEs = [abess_geo_time_mean, skl_geo_time_mean] error = [abess_geo_time_std, skl_geo_time_std] fig = plt.figure(figsize=(8,5)) ax = fig.add_subplot(111) ax.bar(x_pos, CTEs, yerr=error, align='center', alpha=0.5, ecolor='black', capsize=10) ax.set_ylabel('running time') ax.set_xticks(x_pos) ax.set_xticklabels(meth) ax.set_title('running time with abess or scikit-learn') ax.yaxis.grid(True) plt.show() # %% # We can find that the precision score and the recall score with `abess` are generally higher than those without `abess`. # And the running time with `abess` is only slightly slower than that without `abess`. # %% # sphinx_gallery_thumbnail_path = 'Tutorial/figure/geomstats.png'
9,511
43.037037
157
py
abess
abess-master/docs/Tutorial/5-scikit-learn-connection/plot_3_double_machine_learning.py
""" ================================ Work with DoubleML ================================ Double machine learning [1]_ offer a debiased way for estimating low-dimensional parameter of interest in the presence of high-dimensional nuisance. Many machine learning methods can be used to estimate the nuisance parameters, such as random forests, lasso or post-lasso, neural nets, boosted regression trees, and so on. The Python package ``DoubleML`` [2]_ provide an implementation of the double machine learning. It's built on top of scikit-learn and is an excellent package. The object-oriented implementation of ``DoubleML`` is very flexible, in particular functionalities to estimate double machine learning models and to perform statistical inference via the methods fit, bootstrap, confint, p_adjust and tune. """ ############################################################################### # # In fact, ``abess`` [3]_ also works well with the package ``DoubleML``. Here is an example of using ``abess`` to solve such # a problem, and we will compare it to the lasso regression. import numpy as np from sklearn.base import clone from sklearn.linear_model import LassoCV from abess.linear import LinearRegression from doubleml import DoubleMLPLR import matplotlib.pyplot as plt import warnings # ignore warnings warnings.filterwarnings('ignore') import time ############################################################################### # Partially linear regression (PLR) model # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # PLR models take the form # # .. math:: # Y=D \theta_{0}+g_{0}(X)+U, & \quad\mathbb{E}(U \mid X, D)=0,\\ # D=m_{0}(X)+V, & \quad\mathbb{E}(V \mid X)=0, # # where :math:`Y` is the outcome variable, :math:`D` is the policy/treatment variable. :math:`\theta_0` is the main # regression coefficient that we would like to infer, which has the interpretation of the treatment effect parameter. # The high-dimensional vector :math:`X=(X_1,\dots, X_p)` consists of other confounding covariates, and :math:`U` and # :math:`V` are stochastic errors. Usually, :math:`p` is not vanishingly small relative to the sample size, it's # difficult to estimate the nuisance parameters :math:`\eta_0 = (m_0, g_0)`. ``abess`` aims to solve general best subset # selection problem. In PLR models, ``abess`` is applicable when nuisance parameters are sparse. Here, we are going to # use ``abess`` to estimate the nuisance parameters, then combine with ``DoubleML`` to estimate the treatment effect # parameter. ############################################################################### # Data # """" # We simulate the data from a PLR model, which both :math:`m_0` and :math:`g_0` are low-dimensional linear combinations # of :math:`X`, and we save the data as ``DoubleMLData`` class. from doubleml import DoubleMLData np.random.seed(1234) n_obs = 200 n_vars = 600 theta = 3 X = np.random.normal(size=(n_obs, n_vars)) d = np.dot(X[:, :3], np.array([5]*3)) + np.random.standard_normal(size=(n_obs,)) y = theta * d + np.dot(X[:, :3], np.array([5]*3)) + np.random.standard_normal(size=(n_obs,)) dml_data_sim = DoubleMLData.from_arrays(X, y, d) ############################################################################### # Model fitting with ``abess`` # """""""""""""""""""""""""""" # Based on the simulated data, now we are going to illustrate how to integrate the ``abess`` with ``DoubleML``. To # estimate the PLR model with the double machine learning algorithm, first we need to choose a learner to estimate the # nuisance parameters :math:`\eta_0 = (m_0, g_0)`. Considering the sparsity of the data, we can use the adaptive best # subset selection model. Then fitting the model to learn the average treatment effct parameter :math:`\theta_0`. abess = LinearRegression(cv = 5) # abess learner ml_g_abess = clone(abess) ml_m_abess = clone(abess) obj_dml_plr_abess = DoubleMLPLR(dml_data_sim, ml_g_abess, ml_m_abess) # model fitting obj_dml_plr_abess.fit(); print("thetahat:", obj_dml_plr_abess.coef) print("sd:", obj_dml_plr_abess.se) # %% # The estimated value is close to the true parameter, and the standard error is very small. ``abess`` integrates with # ``DoubleML`` easily, and works well for estimating the nuisance parameter. ############################################################################### # Comparison with lasso # ^^^^^^^^^^^^^^^^^^^^^ # The lasso regression is a shrinkage and variable selection method for regression models, which can also be used in # high-dimensional setting. Here, we compare the abess regression with the lasso regression at different variable # dimensions. # %% # The following figures show the absolute bias of the abess learner and the lasso learner. lasso = LassoCV(cv = 5) # lasso learner ml_g_lasso = clone(lasso) ml_m_lasso = clone(lasso) M = 15 # repeate times n_obs = 200 n_vars_range = range(100,1100,300) # different dimensions of confounding covariates theta_lasso = np.zeros(len(n_vars_range)*M) theta_abess = np.zeros(len(n_vars_range)*M) time_lasso = np.zeros(len(n_vars_range)*M) time_abess = np.zeros(len(n_vars_range)*M) j = 0 for n_vars in n_vars_range: for i in range(M): np.random.seed(i) # simulated data: three true variables X = np.random.normal(size=(n_obs, n_vars)) d = np.dot(X[:, :3], np.array([5]*3)) + np.random.standard_normal(size=(n_obs,)) y = theta * d + np.dot(X[:, :3], np.array([5]*3)) + np.random.standard_normal(size=(n_obs,)) dml_data_sim = DoubleMLData.from_arrays(X, y, d) # Estimate double/debiased machine learning models starttime = time.time() obj_dml_plr_lasso = DoubleMLPLR(dml_data_sim, ml_g_lasso, ml_m_lasso) obj_dml_plr_lasso.fit() endtime = time.time() time_lasso[j*M + i] = endtime - starttime theta_lasso[j*M + i] = obj_dml_plr_lasso.coef starttime = time.time() obj_dml_plr_abess = DoubleMLPLR(dml_data_sim, ml_g_abess, ml_m_abess) obj_dml_plr_abess.fit() endtime = time.time() time_abess[j*M + i] = endtime - starttime theta_abess[j*M + i] = obj_dml_plr_abess.coef j = j + 1 # absolute bias abs_bias1 = [abs(theta_lasso-theta)[:M],abs(theta_abess-theta)[:M]] abs_bias2 = [abs(theta_lasso-theta)[M:2*M],abs(theta_abess-theta)[M:2*M]] abs_bias3 = [abs(theta_lasso-theta)[2*M:3*M],abs(theta_abess-theta)[2*M:3*M]] abs_bias4 = [abs(theta_lasso-theta)[3*M:4*M],abs(theta_abess-theta)[3*M:4*M]] labels = ["lasso", "abess"] fig, ([ax1, ax2], [ax3, ax4]) = plt.subplots(nrows=2, ncols=2, figsize=(10,5)) bplot1 = ax1.boxplot(abs_bias1, vert=True, patch_artist=True, labels=labels) ax1.set_title("p = 100") bplot2 = ax2.boxplot(abs_bias2, vert=True, patch_artist=True, labels=labels) ax2.set_title("p = 400") bplot3 = ax3.boxplot(abs_bias3, vert=True, patch_artist=True, labels=labels) ax3.set_title("p = 700") bplot4 = ax4.boxplot(abs_bias4, vert=True, patch_artist=True, labels=labels) ax4.set_title("p = 1000") colors = ["lightblue", "orange"] for bplot in (bplot1, bplot2, bplot3, bplot4): for patch, color in zip(bplot["boxes"], colors): patch.set_facecolor(color) for ax in [ax1, ax2, ax3, ax4]: ax.yaxis.grid(True) ax.set_ylabel("absolute bias") plt.show(); # %% # The following figure shows the running time of the abess learner and the lasso learner. plt.plot(np.repeat(n_vars_range, M),time_lasso, "o", color = "lightblue", label="lasso", markersize=3); plt.plot(np.repeat(n_vars_range, M),time_abess, "o", color = "orange", label="abess", markersize=3); slope_lasso, intercept_lasso = np.polyfit(np.repeat(n_vars_range, M),time_lasso, 1) slope_abess, intercept_abess = np.polyfit(np.repeat(n_vars_range, M),time_abess, 1) plt.axline(xy1=(0,intercept_lasso), slope = slope_lasso, color = "lightblue", lw = 2) plt.axline(xy1=(0,intercept_abess), slope = slope_abess, color = "orange", lw = 2) plt.grid() plt.xlabel("number of variables") plt.ylabel("running time") plt.legend(loc="upper left") # %% # At each dimension, we repeat the double machine learning procedure 15 times for each of the two learners. As can be # seen from the above figures, the parameters estimated by both learners are very close to the true parameter # :math:`\theta_0`. But the running time of abess learner is much shorter than lasso. Besides, in high-dimensional # situations, the mean absolute bias of abess learner regression is relatively smaller. # %% # .. rubric:: References # .. [1] Chernozhukov V, Chetverikov D, Demirer M, et al. Double/debiased machine learning for treatment and structural parameters[M]. Oxford University Press Oxford, UK, 2018. # .. [2] Bach P, Chernozhukov V, Kurz M S, et al. Doubleml-an object-oriented implementation of double machine learning in python[J]. Journal of Machine Learning Research, 2022, 23(53): 1-6. # .. [3] Zhu J, Hu L, Huang J, et al. abess: A fast best subset selection library in python and r[J]. arXiv preprint arXiv:2110.09697, 2021. # # %% # sphinx_gallery_thumbnail_path = 'Tutorial/figure/doubleml.png'
9,137
46.103093
190
py
abess
abess-master/docs/Tutorial/5-scikit-learn-connection/plot_4_pyts.py
""" ============== Work with pyts ============== ``pyts`` is a Python package dedicated to time series classification. It aims to make time series classification easily accessible by providing preprocessing and utility tools, and implementations of several time series classification algorithms. In this example, we will mainly focus on the shapelets-based algorithms. """ # %% # Shapelets learning is a new primitive in time series classification. Shapelets are defined as subsequences of time series # that are in some sense maximally representative of a class. Informally, in a binary classification task, # a shapelet is discriminant if it is present in most series of one class and absent from series of the other class. # ``ShapeletTransform`` is a powerful method implemented by ``pyts`` to perform shapelets-based feature transformation. # Actually, ``abess`` also works well with shapelets-based methods. This example shows how to effectively select # discriminant shapelets with ``abess``. # %% import numpy as np import matplotlib.pyplot as plt import time import warnings warnings.filterwarnings('ignore') from abess.linear import LogisticRegression from sklearn.svm import LinearSVC from sklearn.pipeline import make_pipeline from pyts.transformation import ShapeletTransform from pyts.datasets import load_coffee # %% # Data # """" # In this example, we use the buint-in coffee dataset in ``pyts`` to perform shapelets learning. It has two classes, # 0 and 1. So, this is a binary classification task. Both train dataset and test dataset have 28 time series and the # dimension of each time series is 286. We plot the time series in the train dataset. # %% X_train, X_test, y_train, y_test = load_coffee(return_X_y=True) print("X_train shape: ", X_train.shape) print("X_test shape: ", X_test.shape) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5), dpi=600) ax1.plot(X_train[y_train == 0].T) ax1.set_title("Class = 0", fontsize=15) ax1.set_xlabel("Time", fontsize=15) ax1.set_ylabel("Value", fontsize=15) ax2.plot(X_train[y_train == 1].T) ax2.set_title("Class = 1", fontsize=15) ax2.set_xlabel("Time", fontsize=15) ax2.set_ylabel("Value", fontsize=15) fig.tight_layout() plt.show() # %% # Learning shapelets with ``abess`` # """"""""""""""""""""""""""""""""" # To select discriminant shapelets, we first collect all subsequences with predefined length and step as the candidates. # Then we transform the original time series by computing the distance between them to each subsequence. Therefore, # the original time series are transformed to some ultra high dimensional vectors. Finally, we perform binary # classification and shapelets selection simultaneously with ``LogisticRegression`` implemented by ``abess``. # %% class abessShapelet(object): def __init__(self, X_train, X_test, y_train, y_test, len_shapelet=None, step=None): self.X_train = X_train self.X_test = X_test self.y_train = y_train self.y_test = y_test self.n, self.p = X_train.shape if len_shapelet == None: len_shapelet = int(self.p / 4) if step == None: step = int(len_shapelet / 2) num_each = 1 + (self.p - len_shapelet) // step self.shapelets = [] for i in range(self.n): for j in range(num_each): col = j*step self.shapelets.append(self.X_train[i, col:(col+len_shapelet)]) self.shapelets = np.array(self.shapelets) def distant(self, x, y): assert x.ndim == 1 and y.ndim == 1 n_x = len(x) n_y = len(y) if n_x <= n_y: dist = np.zeros(n_y-n_x+1) for i in range(n_y-n_x+1): shapelet = y[i:i+n_x] dist[i] = np.sum((x-shapelet)**2) return dist.min() else: dist = np.zeros(n_x-n_y+1) for i in range(n_x-n_y+1): shapelet = x[i:i+n_y] dist[i] = np.sum((y-shapelet)**2) return dist.min() def featureTransform(self, X, shapelets, index=None): if index is None: index = np.arange(shapelets.shape[0]) n, p = X.shape num_shapelets, k = shapelets.shape new_feature = np.zeros((n, num_shapelets)) for i in range(n): for j in index: new_feature[i, j] = self.distant(X[i], shapelets[j]) return new_feature def fit_predict(self, size=None): X_train_new = self.featureTransform(self.X_train, self.shapelets) model = LogisticRegression(support_size=size) model.fit(X_train_new, self.y_train) self.index = np.nonzero(model.coef_)[0] X_test_new = self.featureTransform( self.X_test, self.shapelets, self.index) y_pred = model.predict(X_test_new) return y_pred # %% # In the following, we perform shapelets learning using ``abessShapelet``. We print the performance and execution time. # %% t1 = time.time() aShapelet = abessShapelet(X_train, X_test, y_train, y_test, len_shapelet=75) y_pred = aShapelet.fit_predict(size=2) score_abess = (y_pred == y_test).mean() t2 = time.time() print("score_abess: ", round(score_abess, 2)) print("time_abess : {}s".format(round(t2 - t1, 2))) # %% # Learning shapelets with ``pyts`` # """""""""""""""""""""""""""""""" # We compare our method with the one implemented in ``pyts``, which is a two-step procedure. First, it selects discriminant # shapelets based on mutual information. Then, a support vector machine is applied to perform binary classification with # transformed time series based on those selected shapelets. Analogously, we print the performance and execution time. # %% t3 = time.time() shapelet = ShapeletTransform(n_shapelets=2, window_sizes=[75], sort=True) svc = LinearSVC() clf = make_pipeline(shapelet, svc) clf.fit(X_train, y_train) score_pyts = clf.score(X_test, y_test) t4 = time.time() print("score_pyts: ", round(score_pyts, 2)) print("time_pyts : {}s".format(round(t4 - t3, 2))) # %% # It can be seen from the above results that the linear classifier ``abessShapelet`` obtains the same performance with # the method implemented by ``pyts`` while the running time is much shorter. # %% # Plot: learned shapelets # """"""""""""""""""""""" # The following figure shows the discriminant shapelets selected by these two methods. # %% fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5), dpi=600) ax1.plot(aShapelet.shapelets[aShapelet.index][0], label="Shapelet_1") ax1.plot(aShapelet.shapelets[aShapelet.index][1], label="Shapelet_2") ax1.legend() ax1.set_title("abess", fontsize=15) ax1.set_xlabel("Time", fontsize=15) ax1.set_ylabel("Value", fontsize=15) ax2.plot(shapelet.shapelets_[0], label="Shapelet_1") ax2.plot(shapelet.shapelets_[1], label="Shapelet_2") ax2.legend() ax2.set_title("pyts", fontsize=15) ax2.set_xlabel("Time", fontsize=15) ax2.set_ylabel("Value", fontsize=15) plt.suptitle("Learned Shapelets") plt.show() # %% # sphinx_gallery_thumbnail_path = 'Tutorial/figure/pyts.png'
7,074
37.873626
123
py
abess
abess-master/docs/Tutorial/5-scikit-learn-connection/plot_5_dowhy.py
""" ================================ Work with DoWhy ================================ ``DoWhy`` is a Python library for causal inference that supports explicit modeling and testing of causal assumptions. In this section, we will use ``abess`` to cope with high-dimensional mediation analysis problem, which is a popular topic in the research of causal inference. High-Dimensional mediation model is crucial in biomedical research studies. People always want to know by what mechanism the genes with differential expression, distinct genotypes or various epigeneticmarkers affect the outcome or phenotype. Such a mechanistic process is what a mediation analysis aims to characterize (Huang and Pan, 2016 [2]_ ). """ ############################################################################### # # A typical example of the high-dimensional mediators is high-dimensional DNA methylation markers. This model can be represented # by the following figure (Zhang et al. 2016 [1]_ ), # # .. image:: ../../Tutorial/figure/med_model.png # # where :math:`X` is treatment (exposure), :math:`Y` is outcome, and :math:`M_k,k=1,...,p` are (potential) mediators. Moreover, # :math:`\alpha=\left(\alpha_{1}, \cdots, \alpha_{p}\right)^{\mathrm{T}}` denotes the parameters relating :math:`X` to mediators, # and :math:`\beta=\left(\beta_{1}, \cdots, \beta_{p}\right)^{\mathrm{T}}` denotes the parameters relating the mediators to # the outcome :math:`Y`. For the latter relation, we would consider both continuous outcomes (linear regression) and # binary outcomes (logistic regression). These two models can be implemented by ``abess`` directly. # # For instance, if the outcome is continuous, then we assume the model take the form # # .. math:: # M_{k}=c_{k}+\alpha_{k} X+e_{k}, \quad k=1, \cdots, p \\ # Y=c+\gamma X+\beta_{1} M_{1}+\cdots+\beta_{p} M_{p}+\epsilon, # # Among all the possible :math:`M_p` (:math:`p` may be large), only few of them are real mediators, which means that both :math:`\alpha_k` # and :math:`\beta_k` should be non-zero. Then, an indirect path :math:`X \rightarrow M_k \rightarrow Y` can be built. Next, we will # show that by directly using ABESS in a naive form, we can get a good result. ############################################################################### # Continuous Outcome # """""""""""""""""" # We will follow the simulation settings and the data generating process in the R document of the R package HIMA # (Zhang et al. 2016). :math:`X` is generated from :math:`N(0,1.5)`, the first 8 elements of :math:`\beta\left(\beta_{k}, k=1, \cdots, 8\right)` # are :math:`(0.55,0.6,0.65,0.7,0.8,0.8,0,0)^{\mathrm{T}}`, and the first 8 elements of :math:`a\left(\alpha_{k}, k=1, \cdots, 8\right)` # are :math:`(0.45,0.5,0.6,0.7,0,0,0.5,0.5)^{\mathrm{T}}`. The rest of :math:`\beta` and :math:`\alpha` are all 0 . Let :math:`c=1, \gamma=0.5. c_{k}` # is chosen as a random number from :math:`U(0,2) . e_{k}` and :math:`\epsilon` are generated from :math:`N(0,1.2)` and :math:`N(0,1)`, respectively. # import numpy as np import pandas as pd import random import abess import math from dowhy import CausalModel import dowhy.datasets import dowhy.causal_estimators.linear_regression_estimator import warnings warnings.filterwarnings('ignore') # The data-generating function: def simhima (n,p,alpha,beta,seed,binary=False): random.seed(seed) ck = np.random.uniform(0,2,size=p) M = np.zeros((n,p)) X = np.random.normal(0,1.5,size=n) for i in range(n): e = np.random.normal(0,1.2,size=p) M[i,:] = ck + X[i]*alpha + e X = X.reshape(n,1) XM = np.concatenate((X,M),axis=1) B = np.concatenate((np.array([0.5]),beta),axis=0) E = np.random.normal(0,1,size=n) Y = 0.5 + np.matmul(XM,B) + E if binary: Y = np.random.binomial(1,1/(1+np.exp(Y)),size=n) return {"Y":Y, "M":M, "X":X} n = 300 p = 200 alpha = np.zeros(p) beta = np.zeros(p) alpha[0:8] = (0.45,0.5,0.6,0.7,0,0,0.5,0.5) beta[0:8] = (0.55,0.6,0.65,0.7,0.8,0.8,0,0) simdat = simhima(n,p,alpha,beta,seed=12345) # %% # Now, let's examine again our settings. There are altogether :math:`p=200` possible mediators, but only few of them are the # true mediators that we really want to find out. A true mediator must have both non-zero :math:`\alpha` and :math:`\beta`, so only # the first four mediators satisfy this condition (indices 0,1,2,3). We also set up four false mediators that are easily # confused (indices 4,5,6,7), which have either non-zero :math:`\alpha` or :math:`\beta`, and should avoid being selected by our method. # # The whole structure can be divided into left and right parts. The left part is about the paths :math:`X \rightarrow M_i, i=1,2,...p`, # and the right part is about the paths :math:`M_i \rightarrow Y, i=1,2,...p`. A natural idea is to apply ``abess`` to these two subproblems # separately. Notice: the right part is in the same form as the problem ``abess`` wants to solve: one dependent variable and multiple # possible independent variables, but the left part is opposite: we have one independent variable and multiple possible dependent # variables. In this case, continuing to naively use ``abess`` may lead to philosophical causal issues and cannot have good theoretical # guarantees, since we have to treat :math:`X` as an "dependent variable" and treat :math:`M_i` as "independent variables", which is contrary # to the interpretation in reality. However, this naive approach performs well in this task of selecting true mediators, and this # kind of idea has already been used in some existing methods, such as Coordinate-wise Mediation Filter (Van Kesteren and Oberski, 2019 [3]_ ). # Therefore, we will still use this kind of idea here, and the main task is to show the power of ``abess``. # # We will first apply BESS with a fixed support size to one of these subproblems, conducting a preliminary screening to # determine some candidate mediators, and then apply ABESS with an adaptive size to the second subproblem and decide the # final selected mediators. If we directly use ABESS in the first subproblem, the candidate set would be too small, and # make the ABESS in the second step meaningless (because the candidate set is no longer high-dimensional at this time), # which could induce a large drop of TPR (True Positive Rate). The support size used in the first step can be tuned, and # its size is preferably 2 to 4 times the number of true mediators. # # Now there is a problem of order. Since we have to run ``abess`` twice, should we do the left half or the right half first? # We've found that doing the left half first is almost always a better choice. The reason is as follows: if we do the right # half first, those false mediators that only have correlation coefficients with the left half will be easily selected because # there is an :math:`M_i \leftarrow X \rightarrow Y` path (note that from :math:`X` to :math:`Y` has not only indirect paths, but also a direct path!), # and once these false mediators are selected into the second step, they will be selected eventually because they have non-zero # coefficients in the left half, resulting in uncontrollable FDR. But doing the left half first won't have such a problem. model = abess.LinearRegression(support_size=10) model.fit(simdat["M"],simdat["X"]) ind = np.nonzero(model.coef_) print("estimated non-zero: ", ind) print("estimated coef: ", model.coef_[ind]) # %% # This the subproblem of left half, and we use a "support size=10" to demonstrate conveniently. These 10 mediators have # been selected in the first step and entered our candidate set for the second step. Recall that the true mediators we want # to find have index 0,1,2,3. They are all selected in the candidate set. model1 = abess.LinearRegression() model1.fit(simdat["M"].T[ind].T,simdat["Y"]) ind1 = np.nonzero(model1.coef_) print("estimated non-zero: ", ind[0][ind1]) recorded_index = ind[0][ind1] # %% # This is the second step, and we use an adaptive support size, which lead to a final selection: index 0,1,2,3. We've # perfectly accomplished the task of selecting real mediators. After this step, we can use the ``DoWhy`` library for # our data for further analysis. m_num = len(recorded_index) df = pd.DataFrame(simdat["M"].T[recorded_index].T, columns=["FD"+str(i) for i in range(m_num)]) df["y"] = simdat["Y"] df["v0"] = simdat["X"] df.head() # %% # In order to adapt to the naming convention of the ``DoWhy`` library, we renamed the above variables. v0 is treatment, # y is outcome, and FD0 to FD3 (short for Front Door) are mediators. data = dowhy.datasets.linear_dataset(0.5, num_common_causes=0, num_samples=300, num_instruments=0, num_effect_modifiers=0, num_treatments=1, num_frontdoor_variables=m_num, treatment_is_binary=False, outcome_is_binary=False) my_graph = data["gml_graph"][:-1] + ' edge[ source "v0" target "y"]]' model = CausalModel(df,"v0","y",my_graph, missing_nodes_as_confounders=True) model.view_model() # %% # ``DoWhy`` library can directly display the causal graph we built. Now we can do identification and estimation based # on this causal graph and the data we simulated with ``DoWhy``. For example, we are going to estimate the # natural indirect effect (NIE) of the first mediator :math:`M_0` (FD0). identified_estimand_nie = model.identify_effect(estimand_type="nonparametric-nie", proceed_when_unidentifiable=True) causal_estimate_nie = model.estimate_effect(identified_estimand_nie, method_name="mediation.two_stage_regression", confidence_intervals=False, test_significance=False, method_params = { 'first_stage_model': dowhy.causal_estimators.linear_regression_estimator.LinearRegressionEstimator, 'second_stage_model': dowhy.causal_estimators.linear_regression_estimator.LinearRegressionEstimator } ) print("The estimate of the natural indirect effect of the first mediator is ",causal_estimate_nie.value) # %% # Recall that we have :math:`\alpha_0=0.45` and :math:`\beta_0=0.55`, the true value of the natural indirect effect of the first mediator # is :math:`0.45 \times 0.55 = 0.2475`. Similarly, we can also get the estimated value of NIE # of other mediator variables, and also the natural direct effect (NDE). Since the linear regression model has a simple and known # form in our simulation, it's obvious that the accuracy of our estimates depends only on whether we choose the correct mediators. # Next, we would do 1000 replications and see the performance of `abess` on choosing mediators. recorded_index = ind[0][ind1] for i in range(999): simdat = simhima(n,p,alpha,beta,seed=i) model = abess.LinearRegression(support_size=10) model.fit(simdat["M"],simdat["X"]) ind = np.nonzero(model.coef_) model1 = abess.LinearRegression() model1.fit(simdat["M"].T[ind].T,simdat["Y"]) ind1 = np.nonzero(model1.coef_) recorded_index = np.concatenate((recorded_index,ind[0][ind1]),axis=0) mask = np.unique(recorded_index) tmp = [] for v in mask: tmp.append(np.sum(recorded_index==v)) np.vstack((mask,np.array(tmp))).T # %% # After doing 1000 replications of the process mentioned above, we can get this list. The left number in each row is the # index, and the right number is the times that this mediator been selected. We can find that: # # - The true mediators (indices 0-3) can always be selected during all the 1000 replications. # - The bewildering false mediators (indices 4-7) may be occasionally selected, but FDR can be controlled at a low level. # - It is almost impossible for other mediators (indices 8-199) to be selected by our method. # # Now, we can do the confusion matrix analysis, and output some commonly used metrics to measure our selection method. Positive = 4*1000 Negative = 196*1000 TP = np.sum(tmp[:4]) FP = np.sum(tmp[4:]) FN = Positive-TP TN = Negative-FP TPR = TP/Positive TNR = TN/Negative PPV = TP/(TP+FP) FDR = 1-PPV ACC = (TP+TN)/(Positive+Negative) F1 = 2*TP/(2*TP+FP+FN) print('TPR:',TPR,'\nTNR:',TNR,'\nFDR:',FDR,'\nPPV:',PPV,'\nACC:',ACC,'\nF1 score:',F1) ############################################################################### # Binary Outcome # """"""""""""""" # For binary outcome, we still follow the simulation settings of the R documentation of the R package HIMA. We increased # the sample size from 300 to 600, which is also a reasonable size. n = 600 p = 200 alpha = np.zeros(p) beta = np.zeros(p) alpha[0:8] = (0.45,0.5,0.6,0.7,0,0,0.5,0.5) beta[0:8] = (1.45,1.5,1.55,1.6,1.7,1.7,0,0) simdat = simhima(n,p,alpha,beta,seed=12345,binary=True) # %% # First step: model = abess.LinearRegression(support_size=10) model.fit(simdat["M"],simdat["X"]) ind = np.nonzero(model.coef_) print("estimated non-zero: ", ind) print("estimated coef: ", model.coef_[ind]) # %% # Second step: model1 = abess.LogisticRegression() model1.fit(simdat["M"].T[ind].T,simdat["Y"]) ind1 = np.nonzero(model1.coef_) print("estimated non-zero: ", ind[0][ind1]) recorded_index = ind[0][ind1] # %% # Again, we got a perfect result. recorded_index = ind[0][ind1] for i in range(999): simdat = simhima(n,p,alpha,beta,seed=i,binary=True) model = abess.LinearRegression(support_size=10) model.fit(simdat["M"],simdat["X"]) ind = np.nonzero(model.coef_) model1 = abess.LogisticRegression() model1.fit(simdat["M"].T[ind].T,simdat["Y"]) ind1 = np.nonzero(model1.coef_) recorded_index = np.concatenate((recorded_index,ind[0][ind1]),axis=0) mask = np.unique(recorded_index) tmp = [] for v in mask: tmp.append(np.sum(recorded_index==v)) np.vstack((mask,np.array(tmp))).T # %% # TPR has dropped significantly because problems with binary outcomes require a larger sample size than problems with # continuous outcomes. But we found that FDR was also well controlled. Positive = 4*1000 Negative = 196*1000 TP = np.sum(tmp[:4]) FP = np.sum(tmp[4:]) FN = Positive-TP TN = Negative-FP TPR = TP/Positive TNR = TN/Negative PPV = TP/(TP+FP) FDR = 1-PPV ACC = (TP+TN)/(Positive+Negative) F1 = 2*TP/(2*TP+FP+FN) print('TPR:',TPR,'\nTNR:',TNR,'\nFDR:',FDR,'\nPPV:',PPV,'\nACC:',ACC,'\nF1 score:',F1) # %% # In a word, by simply using ``abess`` in high-dimensional mediation analysis problem, we can get good results both # under continuous and binary outcome settings. # %% # .. rubric:: References # .. [1] Zhang H, Zheng Y, Zhang Z, Gao T, Joyce B, Yoon G, Zhang W, Schwartz J, Just A, Colicino E, Vokonas P, Zhao L, Lv J, Baccarelli A, Hou L & Liu L (2016). “Estimating and Testing High-dimensional Mediation Effects in Epigenetic Studies.” Bioinformatics, 32(20), 3150-3154. (doi.org/10.1093/bioinformatics/btw351). # .. [2] Huang YT & Pan WC (2016). “Hypothesis test of mediation effect in causal mediation model with high‐dimensional continuous mediators.” Biometrics, 72(2), 402-413. (doi.org/10.1111/biom.12421) # .. [3] Van Kesteren, E. J., & Oberski, D. L. (2019). “Exploratory mediation analysis with many potential mediators.” Structural Equation Modeling: A Multidisciplinary Journal, 26(5), 710-723. (doi.org/10.1080/10705511.2019.1588124) # # %% # sphinx_gallery_thumbnail_path = 'Tutorial/figure/dowhy.png'
15,683
50.762376
320
py
abess
abess-master/docs/Tutorial/5-scikit-learn-connection/plot_6_imbalanced_learn.py
""" =========================== Work with imbalanced-learn =========================== ``Imbalanced-learn`` is an open source, MIT-licensed library relying on scikit-learn and provides tools when dealing with classification with imbalanced classes. In this tutorial, we will show how to combine ``abess.linear.LogisticRegression`` and ``imbalanced-learn`` to handle a imbalanced binary classification task. """ #%% import warnings warnings.filterwarnings('ignore') import numpy as np from abess.linear import LogisticRegression from abess.datasets import make_glm_data from sklearn.model_selection import train_test_split from sklearn.metrics import balanced_accuracy_score from imblearn.over_sampling import RandomOverSampler, SMOTE, ADASYN from imblearn.under_sampling import RandomUnderSampler, EditedNearestNeighbours #%% # Synthetic data # --------------- #%% # Generate imbalanced dataset (X, y). Here, we use ``make_glm_data`` to generate a balanced # binary dataset ``data`` and then drop 90% of positive samples. Thus, the imbalance ratio of # our example is around 10:1. n, p, k = 5000, 2000, 10 random_state = 12345 np.random.seed(random_state) data = make_glm_data(n=n, p=p, k=k, family='binomial') idx0 = np.where(data.y == 0)[0] # index of negative sample idx1 = np.where(data.y == 1)[0] # index of positive sample idx = np.array(list(set(idx0).union(set(idx1[:int(n/20)])))) X, y = data.x[idx], data.y[idx] print('Generated dataset has {} positive samples and {} negative samples.'.format(np.sum(y==1), np.sum(y==0))) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=random_state) print('Train size: {}, Test size: {}.'.format(len(y_train), len(y_test))) #%% # Base estimator # --------------- model = LogisticRegression(support_size=k) model.fit(X_train, y_train) y_pred = model.predict(X_test) print('Balanced accuracy score: ', balanced_accuracy_score(y_test, y_pred).round(3)) #%% # Over-sampling # -------------- #%% # RandomOverSampler ros = RandomOverSampler() X_train_resampled, y_train_resampled = ros.fit_resample(X_train, y_train) model = LogisticRegression(support_size=k) model.fit(X_train_resampled, y_train_resampled) y_pred = model.predict(X_test) print('Resampled size: ', len(y_train_resampled)) print('Balanced accuracy score: ', balanced_accuracy_score(y_test, y_pred).round(3)) #%% # SMOTE X_train_resampled, y_train_resampled = SMOTE().fit_resample(X_train, y_train) model = LogisticRegression(support_size=k) model.fit(X_train_resampled, y_train_resampled) y_pred = model.predict(X_test) print('Resampled size: ', len(y_train_resampled)) print('Balanced accuracy score: ', balanced_accuracy_score(y_test, y_pred).round(3)) #%% # ADASYN X_train_resampled, y_train_resampled = ADASYN().fit_resample(X_train, y_train) model = LogisticRegression(support_size=k) model.fit(X_train_resampled, y_train_resampled) y_pred = model.predict(X_test) print('Resampled size: ', len(y_train_resampled)) print('Balanced accuracy score: ', balanced_accuracy_score(y_test, y_pred).round(3)) #%% # Under-sampling # ---------------- #%% # RandomUnderSampler rus = RandomUnderSampler() X_train_resampled, y_train_resampled = rus.fit_resample(X_train, y_train) model = LogisticRegression(support_size=k) model.fit(X_train_resampled, y_train_resampled) y_pred = model.predict(X_test) print('Resampled size: ', len(y_train_resampled)) print('Balanced accuracy score: ', balanced_accuracy_score(y_test, y_pred).round(3)) #%% # EditedNearestNeighbours enn = EditedNearestNeighbours(kind_sel='all') X_train_resampled, y_train_resampled = enn.fit_resample(X_train, y_train) model = LogisticRegression(support_size=k) model.fit(X_train_resampled, y_train_resampled) y_pred = model.predict(X_test) print('Resampled size: ', len(y_train_resampled)) print('Balanced accuracy score: ', balanced_accuracy_score(y_test, y_pred).round(3)) #%% # Pipeline # --------- #%% # In the following, we show how to construct a pipeline. # Note that pipeline implemented by sklearn requires that all intermediate # estimators must be transformers. # However, resamplers in imblearn are not transformers. # Instead, we explicitly use pipeline implemented by imblearn here. from imblearn.pipeline import Pipeline as imbPipeline resamplers = { 'RandomOverSampler': RandomOverSampler, 'SMOTE': SMOTE, 'ADASYN': ADASYN, 'RandomUnderSampler': RandomUnderSampler, 'EditedNearestNeighbours': EditedNearestNeighbours } for name in resamplers.keys(): resampler = resamplers[name]() estimators = [('resampler', resampler), ('clf', LogisticRegression(support_size=k))] pipe = imbPipeline(estimators) pipe.fit(X_train, y_train) y_pred = pipe.predict(X_test) print('{}: {}'.format(name, balanced_accuracy_score(y_test, y_pred).round(3)) ) # %% # sphinx_gallery_thumbnail_path = 'Tutorial/figure/imbalanced-learn.png'
4,974
32.166667
110
py
abess
abess-master/docs/real-data/README.md
## Introduction We compare `abess` and other well-known algorithms under linear regression and logistic regression models on several real-world datasets. The comparison is conducted on both Python and R environments. In what follow, we depict step-by-step instructions for properly using scripts in subdirectories. Note that, at presented, users should manually download the datasets from corresponding websites. ## Prerequisite ### Python (version 3.9.1): - abess (0.4.5) - celer (0.6.1) - scikit-learn (1.0.2) - pandas - numpy ### R (version 3.6.3) - abess (0.4.5) - elasiticnet (1.3.0) ## Step-by-step instruction ### superconductivity dataset 1. From https://archive.ics.uci.edu/ml/machine-learning-databases/00464/, download the `superconduct.zip` file into the `superconductivity` directory. Extract the `train.csv` from `superconduct.zip`, and put the `csv` file into the `superconductivity` directory. 2. Run the `superconductivity.py` script: ```bash python superconductivity.py ``` ### cancer dataset 1. Download `chin.RData` (https://github.com/ramhiser/datamicroarray/blob/master/data/chin.RData) into the `cancer` directory. 2. Run the `preprocess.R` script to produce files `chin_x.txt` and `chin_y.txt`: ```bash Rscript preprocess.R ``` 3. Run the `chin.py` script: ```bash python chin.py ``` ### musk dataset 1. Download `clean1.data.Z` and `clean2.data.Z` from https://archive.ics.uci.edu/ml/machine-learning-databases/musk, and extract `clean1.data` and `clean2.data` from the two `.Z` files, then put them into the `musk` directory. 2. Run the `musk.py` script: ```bash python musk.py ``` ### genetic dataset 1. Download `christensen.RData` from https://github.com/ramhiser/datamicroarray/blob/master/data/christensen.RData, and put it into the `genetic` directory. 2. Run the `christensen.R` script: ```bash Rscript christensen.R ```
1,876
30.283333
299
md
abess
abess-master/docs/real-data/cancer/chin.py
# %% import numpy as np import pandas as pd from time import time from abess.linear import LogisticRegression from sklearn.metrics import roc_auc_score from sklearn.linear_model import LogisticRegressionCV from celer import LogisticRegression as celerLogisticRegressionCV from sklearn.model_selection import train_test_split, GridSearchCV import os os.chdir(os.path.dirname(os.path.abspath(__file__))) # %% read data X = pd.read_csv('chin_x.txt', header=0) X = X.to_numpy() y = pd.read_csv('chin_y.txt', header=0) y = np.array(y, dtype = float) y = np.reshape(y, -1) print("sample size: {0}, dimension: {1}".format(X.shape[0], X.shape[1])) # %% evaluation def metrics(coef, pred, real): auc = roc_auc_score(real, pred) nnz = len(np.nonzero(coef)[0]) return np.array([auc, nnz]) M = 20 model_name = "Logistic" method = [ "lasso", "celer", "abess", ] res_output = True data_output = False verbose = True # AUC, NNZ, time met = np.zeros((len(method), M, 3)) res = np.zeros((len(method), 6)) # %% Test print('===== Testing '+ model_name + ' =====') for m in range(M): ind = -1 print(" --> Replication: " + str(m+1)) trainx, testx, trainy, testy = train_test_split(X, y, test_size=0.1, random_state=m) # method 1: # alphas, t1, t2, t3 = celer_path(trainx, 2 * trainy - 1, pb="logreg") # method 2 (https://mathurinm.github.io/celer/auto_examples/plot_finance_path.html#sphx-glr-auto-examples-plot-finance-path-py): alpha_max = np.max(np.abs(trainx.T.dot(trainy))) / trainx.shape[0] n_alphas = 100 alphas = alpha_max * np.geomspace(1, 0.001, n_alphas) ## lasso if "lasso" in method: ind += 1 t_start = time() model = LogisticRegressionCV(Cs=alphas, penalty="l1", solver = "liblinear", cv=5, n_jobs=5, random_state=0) fit = model.fit(trainx, trainy) t_end = time() best_lasso_C = fit.C_[0] met[ind, m, 0:2] = metrics(fit.coef_, fit.predict_proba(testx)[:, 1].flatten(), testy) met[ind, m, 2] = t_end - t_start if verbose: print(" --> SKL time: " + str(t_end - t_start)) print(" --> SKL AUC : " + str(met[ind, m, 0])) print(" --> SKL NNZ : " + str(met[ind, m, 1])) ## celer if "celer" in method: ind += 1 tune_celer = False if tune_celer: parameters = {'C': alphas} t_start = time() model = celerLogisticRegressionCV() model = GridSearchCV(model, parameters, n_jobs=-1, cv=5) fit = model.fit(trainx, trainy) t_end = time() met[ind, m, 0:2] = metrics(fit.best_estimator_.coef_, fit.predict_proba(testx)[:, 1].flatten(), testy) else: t_start = time() model = celerLogisticRegressionCV(C=best_lasso_C) fit = model.fit(trainx, trainy) t_end = time() met[ind, m, 0:2] = metrics(fit.coef_, fit.predict_proba(testx)[:, 1].flatten(), testy) met[ind, m, 2] = t_end - t_start if verbose: print(" --> Celer time: " + str(t_end - t_start)) print(" --> Celer AUC : " + str(met[ind, m, 0])) print(" --> Celer NNZ : " + str(met[ind, m, 1])) ## abess if "abess" in method: ind += 1 t_start = time() model = LogisticRegression(cv=5, support_size = range(100), thread=5, approximate_Newton = True, primary_model_fit_epsilon=1e-6) model.fit(trainx, trainy) t_end = time() met[ind, m, 0:2] = metrics(model.coef_, model.predict_proba(testx)[:, 1].flatten(), testy) met[ind, m, 2] = t_end - t_start if verbose: print(" --> ABESS time: " + str(t_end - t_start)) print(" --> ABESS AUC : " + str(met[ind, m, 0])) print(" --> ABESS NNZ : " + str(met[ind, m, 1])) for ind in range(0, len(method)): m = met[ind].mean(axis = 0) se = met[ind].std(axis = 0) / np.sqrt(M - 1) res[ind] = np.hstack((m, se)) res = np.around(res, decimals=2) print("===== Results " + model_name + " =====") print("Method: \n", method) print("Metrics (AUC, NNZ, Runtime): \n", res[:, 0:3]) file_name = "chin" if (res_output): np.save("{}_{}_res.npy".format(model_name, file_name), res) print("Result saved.")
4,440
30.496454
132
py
abess
abess-master/docs/real-data/musk/musk.py
# %% import numpy as np import pandas as pd from time import time from abess.linear import LogisticRegression from sklearn.metrics import roc_auc_score from sklearn.linear_model import LogisticRegressionCV from celer import LogisticRegression as celerLogisticRegressionCV from sklearn.model_selection import train_test_split, GridSearchCV import os os.chdir(os.path.dirname(os.path.abspath(__file__))) # %% read data data1 = pd.read_csv("clean1.data", header=None) data1 = data1.drop(data1.columns[[0, 1]], axis=1) data1 = data1.to_numpy() data2 = pd.read_csv("clean2.data", header=None) data2 = data2.drop(data2.columns[[0, 1]], axis=1) data2 = data2.to_numpy() data = np.vstack([data1, data2]) X = data[:, range(data.shape[1]-1)] y = data[:, -1] y = np.array(y, dtype=float) y = np.reshape(y, -1) print("sample size: {0}, dimension: {1}".format(X.shape[0], X.shape[1])) print(y.shape) # %% evaluation def metrics(coef, pred, real): auc = roc_auc_score(real, pred) nnz = len(np.nonzero(coef)[0]) return np.array([auc, nnz]) M = 20 model_name = "Logistic" method = [ "lasso", "celer", "abess", ] res_output = True data_output = False verbose = True # AUC, NNZ, time met = np.zeros((len(method), M, 3)) res = np.zeros((len(method), 6)) # Test print('===== Testing '+ model_name + ' =====') for m in range(M): ind = -1 print(" --> Replication: " + str(m+1)) trainx, testx, trainy, testy = train_test_split(X, y, test_size = 0.1, random_state = m) # method 1: # alphas, t1, t2, t3 = celer_path(trainx, 2 * trainy - 1, pb="logreg") # method 2 (https://mathurinm.github.io/celer/auto_examples/plot_finance_path.html#sphx-glr-auto-examples-plot-finance-path-py): alpha_max = np.max(np.abs(trainx.T.dot(trainy))) / trainx.shape[0] n_alphas = X.shape[1] alphas = alpha_max * np.geomspace(1, 0.001, n_alphas) ## lasso if "lasso" in method: ind += 1 t_start = time() # set max_iter=5000 to avoid ConvergenceWarning messages model = LogisticRegressionCV(Cs=alphas, penalty="l1", solver="saga", cv=5, n_jobs=5, max_iter=5000, random_state=0) fit = model.fit(trainx, trainy) t_end = time() best_lasso_C = fit.C_[0] met[ind, m, 0:2] = metrics(fit.coef_, fit.predict_proba(testx)[:, 1].flatten(), testy) met[ind, m, 2] = t_end - t_start if verbose: print(" --> SKL time: " + str(t_end - t_start)) print(" --> SKL AUC: {0}".format(met[ind, m, 0])) print(" --> SKL NNZ: {0}".format(met[ind, m, 1])) ## celer if "celer" in method: ind += 1 parameters = {'C': alphas} tune_celer = False if tune_celer: parameters = {'C': alphas} t_start = time() model = celerLogisticRegressionCV() model = GridSearchCV(model, parameters, n_jobs=-1, cv=5) fit = model.fit(trainx, trainy) t_end = time() met[ind, m, 0:2] = metrics(fit.best_estimator_.coef_, fit.predict_proba(testx)[:, 1].flatten(), testy) else: t_start = time() # ConvergenceWarning frequently occurs, so increase `tol` model = celerLogisticRegressionCV(C=best_lasso_C, tol=2e-1) fit = model.fit(trainx, trainy) t_end = time() met[ind, m, 0:2] = metrics(fit.coef_, fit.predict_proba(testx)[:, 1].flatten(), testy) met[ind, m, 2] = t_end - t_start if verbose: print(" --> Celer time: " + str(t_end - t_start)) print(" --> Celer AUC: {0}".format(met[ind, m, 0])) print(" --> Celer NNZ: {0}".format(met[ind, m, 1])) ## abess if "abess" in method: ind += 1 # max_supp = np.min([100, trainx.shape[1]]) max_supp = trainx.shape[1] t_start = time() # model = abessLogistic(is_cv = True, path_type = "pgs", s_min = 0, s_max = 99, thread = 0) model = LogisticRegression(cv=5, support_size=range(max_supp), thread=5, approximate_Newton=True, primary_model_fit_epsilon=1e-6) model.fit(trainx, trainy) t_end = time() met[ind, m, 0:2] = metrics(model.coef_, model.predict_proba(testx)[:, 1].flatten(), testy) met[ind, m, 2] = t_end - t_start if verbose: print(" --> ABESS time: " + str(t_end - t_start)) print(" --> ABESS AUC: {0}".format(met[ind, m, 0])) print(" --> ABESS NNZ: {0}".format(met[ind, m, 1])) for ind in range(0, len(method)): m = met[ind].mean(axis = 0) se = met[ind].std(axis = 0) / np.sqrt(M - 1) res[ind] = np.hstack((m, se)) res = np.around(res, decimals=2) print("===== Results " + model_name + " =====") print("Method: \n", method) print("Metrics (AUC, NNZ, Runtime): \n", res[:, 0:3]) file_name = "musk" if (res_output): np.save("{}_{}_res.npy".format(model_name, file_name), res) print("Result saved.")
5,047
33.108108
132
py
abess
abess-master/docs/real-data/superconductivity/superconduct.py
# %% import numpy as np from time import time from abess.linear import LinearRegression from sklearn.metrics import mean_squared_error from sklearn.linear_model import LassoCV, OrthogonalMatchingPursuitCV from sklearn.preprocessing import PolynomialFeatures, StandardScaler from celer import LassoCV as celerLassoCV from sklearn.model_selection import train_test_split import pandas as pd import os os.chdir(os.path.dirname(os.path.abspath(__file__))) # %% ## preprocess superconduct data: create a high-dimensional data data = pd.read_csv("train.csv", header=0) data = data.loc[data["number_of_elements"] == 3, :] data = data.drop(columns=['number_of_elements']) y = data.loc[:, 'critical_temp'] X = data.drop(columns=['critical_temp']) # %% feature = PolynomialFeatures(include_bias=False, degree=3, interaction_only=True) X = feature.fit_transform(X) y = np.reshape(y, -1) print("sample size: {}, dimension: {}".format(X.shape[0], X.shape[1])) # %% def metrics(coef, pred, real): auc = mean_squared_error(real, pred) nnz = len(np.nonzero(coef)[0]) return np.array([auc, nnz]) M = 20 model_name = "Linear" method = [ "lasso", "celer", # "omp", # uncomment this line because of memory leak "abess", ] res_output = True data_output = False verbose = True # MSE, NNZ, time met = np.zeros((len(method), M, 3)) res = np.zeros((len(method), 6)) # Test print('===== Testing '+ model_name + ' =====') for m in range(M): ind = -1 print(" --> Replication: " + str(m+1)) trainx, testx, trainy, testy = train_test_split(X, y, test_size=0.1, random_state=m) if "lasso" in method: ind += 1 # transform via StandardScaler and increase tol because of ConvergenceWarning scaler = StandardScaler() scaler.fit(trainx) lasso_trainx = scaler.transform(trainx) lasso_testx = scaler.transform(testx) t_start = time() model = LassoCV(cv=5, n_jobs=5, random_state=0, tol=2e-2) fit = model.fit(lasso_trainx, trainy) t_end = time() met[ind, m, 0:2] = metrics(fit.coef_, fit.predict(lasso_testx), testy) met[ind, m, 2] = t_end - t_start print(" --> SKL time: " + str(t_end - t_start)) print(" --> SKL err : " + str(met[ind, m, 0])) print(" --> SKL NNZ : " + str(met[ind, m, 1])) if "celer" in method: ind += 1 t_start = time() model = celerLassoCV(cv=5, n_jobs=5) fit = model.fit(trainx, trainy) t_end = time() met[ind, m, 0:2] = metrics(fit.coef_, fit.predict(testx), testy) met[ind, m, 2] = t_end - t_start print(" --> CELER time: " + str(t_end - t_start)) print(" --> CELER err : " + str(met[ind, m, 0])) print(" --> CELER NNZ : " + str(met[ind, m, 1])) ## omp if "omp" in method: ind += 1 t_start = time() model = OrthogonalMatchingPursuitCV(cv=5, n_jobs=5, max_iter=100) fit = model.fit(trainx, trainy) t_end = time() met[ind, m, 0:2] = metrics(fit.coef_, fit.predict(testx), testy) met[ind, m, 2] = t_end - t_start print(" --> OMP time: " + str(t_end - t_start)) print(" --> OMP err : " + str(met[ind, m, 0])) print(" --> OMP NNZ : " + str(met[ind, m, 1])) ## abess if "abess" in method: ind += 1 max_supp = np.min([100, trainx.shape[1]]) t_start = time() model = LinearRegression(cv=5, support_size=range(max_supp), thread=5) model.fit(trainx, trainy) t_end = time() met[ind, m, 0:2] = metrics(model.coef_, model.predict(testx), testy) met[ind, m, 2] = t_end - t_start print(" --> ABESS time: " + str(t_end - t_start)) print(" --> ABESS err : " + str(met[ind, m, 0])) print(" --> ABESS NNZ : " + str(met[ind, m, 1])) for ind in range(0, len(method)): m = met[ind].mean(axis = 0) se = met[ind].std(axis = 0) / np.sqrt(M - 1) res[ind] = np.hstack((m, se)) res = np.around(res, decimals=2) print("===== Results " + model_name + " =====") print("Method: \n", method) print("Metrics (MSE, NNZ, Runtime): \n", res[:, 0:3]) if (res_output): np.save(model_name + "_res.npy", res) print("Result saved.")
4,311
30.940741
88
py
abess
abess-master/docs/simulation/README.md
## Introduction We compare `abess` and other well-known algorithms under linear regression and logistic regression model. The comparison is conducted on both Python and R environments. ## Prerequisite ### Python (version 3.9.1): - abess (0.4.5) - scikit-learn (1.0.2) - numpy ### R (version 3.6.3) - abess (0.4.5) - glmnet (4.1-1) - ncvreg (3.13.0) - L0Learn (2.0.3) - ggpubr - mccr - pROC - ggplot2 - tidyr ## Python directory ### Files - `run_benchmark_linear.py`: conducts simulation on sparse linear model - `run_benchmark_logistic.py`: conducts simulation on sparse logistic regression - `plot_results_figure.py`: visualizes the simulation results outputted by `run_benchmark_linear.py` and `run_benchmark_logistic.py` - `python plot_important_search.py`: conducts simulation on the with/without important-searching technique ### Instructions - To reproduce the simulation results in demonstrated in: https://abess.readthedocs.io/en/latest/auto_gallery/1-glm/plot_a1_power_of_abess.html, conduct: ``` python plot_results_figure.py ``` - Run `python plot_important_search.py` to reproduce the lastest figure in: https://abess.readthedocs.io/en/latest/auto_gallery/4-computation-tips/plot_large_dimension.html#experimental-evidences-important-searching. ## R directory ### Files - `linear_source.R` and `logistic_source.R`: include core code for simulation - `run_benchmark.R`: conducts simulation on sparse linear model and sparse logistic regression - `plot_results_figure.R`: visualizes the simulation results outputted by `run_benchmark.R` ### Instruction To reproduce the Figures in [this article](https://abess-team.github.io/abess/articles/v11-power-of-abess.html#results), run: ```bash Rscript run_benchmark.R Rscript plot_results_figure.R ```
1,773
29.586207
216
md
abess
abess-master/docs/simulation/Python/plot_important_search.py
from time import time import numpy as np from sklearn.metrics import roc_auc_score import matplotlib.pyplot as plt from abess.linear import LogisticRegression from abess.datasets import make_glm_data np.random.seed(0) n = 500 p = 2000 k = 20 rho = 0.1 M = 50 search_path = [32, 64, 128, 256, 512, 1024, 2048] met_save = True res_save = True figure_save = True met = np.zeros((len(search_path), M, 2)) res = np.zeros((len(search_path), 5)) for m in range(M): train = make_glm_data(n=n, p=p, k=k, family='binomial') test = make_glm_data(n=n, p=p, k=k, family='binomial', coef_=train.coef_) print("==> Iter : ", m) for i, imp in enumerate(search_path): ts = time() model = LogisticRegression( support_size=range(100), important_search=imp) model.fit(train.x, train.y) te = time() met[i, m, 0] = roc_auc_score(test.y, model.predict(test.x)) met[i, m, 1] = te - ts for i, imp in enumerate(search_path): res[i, 0] = imp m = met[i].mean(axis=0) se = met[i].std(axis=0) / np.sqrt(M - 1) res[i, 1:5] = np.hstack((m, se)) if met_save: np.save('met.npy', met) if res_save: np.save('res.npy', res) if figure_save: res = np.load("res.npy") # print(res) plt.figure(figsize=(20, 6)) plt.subplot(121) plt.errorbar(res[:, 0], res[:, 1], yerr=res[:, 3] * 2, capsize=3) plt.xticks(res[:, 0], [str(i) for i in res[:, 0]]) plt.ylim(0.9, 1) plt.ylabel('AUC') plt.xlabel('log2(important_search)') # plt.savefig('./auc.png') plt.subplot(122) plt.errorbar(res[:, 0], res[:, 2], yerr=res[:, 4] * 2, capsize=3) plt.xticks(res[:, 0], [str(i) for i in res[:, 0]]) plt.title('Time(/s)') plt.xlabel('log2(important_search)') # plt.savefig('./time.png') plt.savefig('./impsearch.png') print('Figure saved.')
1,874
24.337838
77
py
abess
abess-master/docs/simulation/Python/plot_results_figure.py
import os import sys import numpy as np import matplotlib.pyplot as plt import matplotlib.patches as mpatches # %% run test os.chdir(os.path.dirname(os.path.abspath(__file__))) files = [ 'Lm0.1_res.npy', 'Lm0.7_res.npy', 'Logistic0.1_res.npy', 'Logistic0.7_res.npy', 'Lm0.1_data.npy', 'Lm0.7_data.npy', 'Logistic0.1_data.npy', 'Logistic0.7_data.npy' ] benchmarks = [ 'run_benchmark_linear.py 0.1', 'run_benchmark_linear.py 0.7', 'run_benchmark_logistic.py 0.1', 'run_benchmark_logistic.py 0.7' ] python_path = sys.executable for i in range(4): if not (os.path.exists(files[i]) and os.path.exists(files[i]) + 4): os.system(python_path + ' ' + benchmarks[i]) # %% load results lm1_res = np.load('Lm0.1_res.npy') lm7_res = np.load('Lm0.7_res.npy') logi1_res = np.load('Logistic0.1_res.npy') logi7_res = np.load('Logistic0.7_res.npy') lm1_data = np.load('Lm0.1_data.npy') lm7_data = np.load('Lm0.7_data.npy') logi1_data = np.load('Logistic0.1_data.npy') logi7_data = np.load('Logistic0.7_data.npy') # %% plot performance results plt.figure(1,figsize=(18, 27)) # lm color = ['#00AF91', '#FFCC29', '#5089C6'] c1 = mpatches.Patch(color=color[0], label='Lasso') c2 = mpatches.Patch(color=color[1], label='OMP') c3 = mpatches.Patch(color=color[2], label='ABESS') plt.subplot(321) for i in range(lm1_data.shape[0]): plt.boxplot(x=[lm1_data[i, lm1_data[i, :, 0] < 100, 0], lm7_data[i, lm7_data[i, :, 0] < 100, 0]], patch_artist=True, labels=['', ''], positions=[i + 1, i + 5], widths=0.7, boxprops=dict(facecolor=color[i])) plt.xlabel('low corr high corr') plt.title('Linear - Predict Error') plt.subplot(323) for i in range(lm1_data.shape[0]): plt.boxplot(x=[lm1_data[i, :, 1], lm7_data[i, :, 1]], patch_artist=True, labels=['', ''], positions=[i + 1, i + 5], widths=0.7, boxprops=dict(facecolor=color[i])) plt.xlabel('low corr high corr') plt.title('Linear - Coefficient error') plt.subplot(325) for i in range(lm1_data.shape[0]): plt.boxplot(x=[lm1_data[i, :, 3], lm7_data[i, :, 3]], patch_artist=True, labels=['', ''], positions=[i + 1, i + 5], widths=0.7, boxprops=dict(facecolor=color[i])) plt.xlabel('low corr high corr') plt.title('Linear - FPR') plt.legend(handles=[c1, c2, c3]) # logi color = ['#00AF91', '#5089C6'] plt.subplot(322) for i in range(logi1_data.shape[0]): plt.boxplot(x=[logi1_data[i, :, 0], logi7_data[i, :, 0]], patch_artist=True, labels=['', ''], positions=[i + 1, i + 4], widths=0.7, boxprops=dict(facecolor=color[i])) plt.xlabel('low corr high corr') plt.title('Logistic - AUC') plt.subplot(324) for i in range(logi1_data.shape[0]): plt.boxplot(x=[logi1_data[i, :, 1], logi7_data[i, :, 1]], patch_artist=True, labels=['', ''], positions=[i + 1, i + 4], widths=0.7, boxprops=dict(facecolor=color[i]), medianprops=dict(color=color[i])) plt.xlabel('low corr high corr') plt.title('Logistic - Coefficient error') plt.subplot(326) for i in range(logi1_data.shape[0]): plt.boxplot(x=[logi1_data[i, :, 3], logi7_data[i, :, 3]], patch_artist=True, labels=['', ''], positions=[i + 1, i + 4], widths=0.7, boxprops=dict(facecolor=color[i])) plt.xlabel('low corr high corr') plt.title('Logistic - FPR') plt.legend(handles=[c1, c3]) plt.savefig('perform.png') print('Perfromance figure saved.') # %% plot timing results plt.figure(2, figsize=(14, 6)) # lm_time plt.subplot(121) color = ['#00AF91', '#FFCC29', '#5089C6'] c1 = mpatches.Patch(color=color[0], label='Lasso') c2 = mpatches.Patch(color=color[1], label='OMP') c3 = mpatches.Patch(color=color[2], label='ABESS') temp = np.vstack((lm1_res[:, [5, 11]], lm7_res[:, [5, 11]])) plt.bar(x=[1, 2, 3, 5, 6, 7], height=temp[:, 0], yerr=temp[:, 1] * 2, capsize=10, tick_label='', color=color) plt.xlabel('low corr high corr') plt.title('Linear') plt.ylabel('time(s)') plt.ylim((0, 8)) plt.legend(handles=[c1, c2, c3]) # plt.savefig('./lm_time.png') # logi_time plt.subplot(122) color = ['#00AF91', '#5089C6'] temp = np.vstack((logi1_res[:, [5, 11]], logi7_res[:, [5, 11]])) lm_time = plt.bar(x=[1, 2, 4, 5], height=temp[:, 0], yerr=temp[:, 1] * 2, capsize=10, tick_label='', color=color[0:2]) plt.title('Logistic') plt.ylabel('time(s)') plt.ylim((0, 8)) plt.legend(handles=[c1, c3]) plt.xlabel('low corr high corr') # plt.savefig('./logi_time.png') plt.savefig('timings.png') print('Timing figure saved.')
4,966
32.113333
80
py
abess
abess-master/docs/simulation/Python/run_benchmark_linear.py
import sys import warnings from time import time import numpy as np from sklearn.metrics import matthews_corrcoef from sklearn.linear_model import LassoCV from sklearn.linear_model import OrthogonalMatchingPursuitCV # from spams import fistaFlat # from sklearn.model_selection import GridSearchCV # from glmnet import ElasticNet # import statsmodels.api as sm # from l0bnb import fit_path from abess.linear import LinearRegression from abess.datasets import make_glm_data warnings.filterwarnings("ignore", category=FutureWarning) def metrics(coef, pred, test): pred_err = np.linalg.norm((pred - test.y)) coef_err = np.linalg.norm(coef - test.coef_) p = abs(coef) > 1e-5 r = abs(test.coef_) > 1e-5 tpr = sum(r & p) / sum(r) fpr = sum(~r & p) / sum(~r) mcc = matthews_corrcoef(r, p) return np.array([pred_err, coef_err, tpr, fpr, mcc]) n = 500 p = 8000 M = 20 rho = float(sys.argv[1]) model_name = "Lm" method = [ "lasso", "omp", # "statsmodels", # "glmnet", # "l0bnb", # "spams", "abess", ] res_output = True data_output = True # pred_err, coef_err, tpr, fpr, mcc, time met = np.zeros((len(method), M, 6)) res = np.zeros((len(method), 12)) print('===== Testing ' + model_name + " - " + str(rho) + ' =====') for m in range(M): ind = -1 if m % 10 == 0: print(" --> iter: " + str(m)) # data gene np.random.seed(m) train = make_glm_data(n=n, p=p, k=10, family="gaussian", rho=rho) np.random.seed(m + M) test = make_glm_data( n=n, p=p, k=10, family="gaussian", rho=rho, coef_=train.coef_) # lasso if "lasso" in method: ind += 1 t_start = time() model = LassoCV(cv=5, n_jobs=5) fit = model.fit(train.x, train.y) t_end = time() met[ind, m, 0:5] = metrics(fit.coef_, fit.predict(test.x), test) met[ind, m, 5] = t_end - t_start # print(" --> SKL time: " + str(t_end - t_start)) # omp if "omp" in method: ind += 1 t_start = time() model = OrthogonalMatchingPursuitCV(cv=5, n_jobs=5, max_iter=100) fit = model.fit(train.x, train.y) t_end = time() met[ind, m, 0:5] = metrics(fit.coef_, fit.predict(test.x), test) met[ind, m, 5] = t_end - t_start # print(" --> OMP time: " + str(t_end - t_start)) # statsmodels if "statsmodels" in method: ind += 1 t_start = time() model = sm.OLS(train.y, train.x) fit = model.fit_regularized(alpha=1, L1_wt=1) t_end = time() met[ind, m, 0:5] = metrics(fit.params, fit.predict(test.x), test) met[ind, m, 5] = t_end - t_start # print(" --> STATS time: " + str(t_end - t_start)) # glmnet if "glmnet" in method: ind += 1 t_start = time() model = ElasticNet(n_jobs=8) fit = model.fit(train.x, train.y) t_end = time() met[ind, m, 0:5] = metrics(fit.coef_, fit.predict(test.x), test) met[ind, m, 5] = t_end - t_start # print(" --> GLMNET time: " + str(t_end - t_start)) # l0bnb if "l0bnb" in method: ind += 1 t_start = time() fit = fit_path(train.x, train.y, max_nonzeros=99) t_end = time() pred = np.dot(test.x, fit[4]['B']) + fit[4]['B0'] met[ind, m, 0:5] = metrics(fit[4]['B'], pred, test) met[ind, m, 5] = t_end - t_start # print(" --> L0BNB time: " + str(t_end - t_start)) # spams if "spams" in method: ind += 1 W0 = np.zeros((p + 1, 1)) X0 = np.asfortranarray(np.hstack(((train.x), np.ones((n, 1))))) Y0 = np.asfortranarray(train.y.reshape(len(train.y), 1)) t_start = time() fit = fistaFlat( Y0, X0, W0, regul='l0', loss='square', lambda1=1000, intercept=True) t_end = time() fit = fit.reshape(-1) pred = np.dot(test.x, fit[0: p]) + fit[p] met[ind, m, 0:5] = metrics(fit[0:p], pred, test) met[ind, m, 5] = t_end - t_start # print(" --> SPAMS time: " + str(t_end - t_start)) # abess if "abess" in method: ind += 1 t_start = time() model = LinearRegression(cv=5, support_size=range(100), thread=5) fit = model.fit(train.x, train.y) t_end = time() met[ind, m, 0:5] = metrics(fit.coef_, fit.predict(test.x), test) met[ind, m, 5] = t_end - t_start # print(" --> ABESS time: " + str(t_end - t_start)) for ind in range(0, len(method)): m = met[ind].mean(axis=0) se = met[ind].std(axis=0) / np.sqrt(M - 1) res[ind] = np.hstack((m, se)) print("===== Results " + model_name + " - " + str(rho) + " =====") print("Method: \n", method) print("Metrics: \n", res[:, 0:6]) print("Err: \n", res[:, 6:12]) if res_output: np.save(model_name + str(rho) + "_res.npy", res) print("Result saved.") if data_output: np.save(model_name + str(rho) + "_data.npy", met) print("Data saved.")
5,137
25.900524
73
py
abess
abess-master/docs/simulation/Python/run_benchmark_logistic.py
import sys from time import time import numpy as np from sklearn.metrics import matthews_corrcoef, roc_auc_score from sklearn.linear_model import LogisticRegressionCV # from glmnet import LogitNet # import statsmodels.api as sm from abess.linear import LogisticRegression from abess.datasets import make_glm_data def metrics(coef, pred, test): auc = roc_auc_score(test.y, pred) coef_err = np.linalg.norm(coef - test.coef_) p = abs(coef) > 1e-5 r = abs(test.coef_) > 1e-5 tpr = sum(r & p) / sum(r) fpr = sum(~r & p) / sum(~r) mcc = matthews_corrcoef(r, p) return np.array([auc, coef_err, tpr, fpr, mcc]) n = 500 p = 8000 M = 20 rho = float(sys.argv[1]) model_name = "Logistic" method = [ "lasso", # "statsmodels", # "glmnet", "abess", ] res_output = True data_output = True # auc, coef_err, tpr, fpr, mcc, time met = np.zeros((len(method), M, 6)) res = np.zeros((len(method), 12)) print('===== Testing ' + model_name + " - " + str(rho) + ' =====') for m in range(M): ind = -1 if m % 10 == 0: print(" --> iter: " + str(m)) # data gene np.random.seed(m) train = make_glm_data(n=n, p=p, k=10, family="binomial", rho=rho) np.random.seed(m + M) test = make_glm_data( n=n, p=p, k=10, family="binomial", rho=rho, coef_=train.coef_) # lasso if "lasso" in method: ind += 1 t_start = time() model = LogisticRegressionCV( penalty="l1", solver="liblinear", cv=5, n_jobs=5) fit = model.fit(train.x, train.y) t_end = time() met[ind, m, 0:5] = metrics( fit.coef_.flatten(), fit.predict(test.x), test) met[ind, m, 5] = t_end - t_start # print(" --> SKL time: " + str(t_end - t_start)) # statsmodels if "statsmodels" in method: ind += 1 t_start = time() model = sm.Logit(train.y, train.x) fit = model.fit_regularized(alpha=1, L1_wt=1) t_end = time() met[ind, m, 0:5] = metrics(fit.params, fit.predict(test.x), test) met[ind, m, 5] = t_end - t_start # print(" --> STATS time: " + str(t_end - t_start)) # glmnet if "glmnet" in method: ind += 1 t_start = time() model = LogitNet(n_jobs=8) fit = model.fit(train.x, train.y) t_end = time() met[ind, m, 0:5] = metrics( fit.coef_.flatten(), fit.predict(test.x), test) met[ind, m, 5] = t_end - t_start # print(" --> GLMNET time: " + str(t_end - t_start)) # abess if "abess" in method: ind += 1 t_start = time() model = LogisticRegression(cv=5, support_size=range(100), thread=5, approximate_Newton=True, primary_model_fit_epsilon=1e-6) fit = model.fit(train.x, train.y) t_end = time() met[ind, m, 0:5] = metrics(fit.coef_, fit.predict(test.x), test) met[ind, m, 5] = t_end - t_start # print(" --> ABESS time: " + str(t_end - t_start)) for ind in range(0, len(method)): m = met[ind].mean(axis=0) se = met[ind].std(axis=0) / np.sqrt(M - 1) res[ind] = np.hstack((m, se)) print("===== Results " + model_name + " - " + str(rho) + " =====") print("Method: \n", method) print("Metrics: \n", res[:, 0:6]) print("Err: \n", res[:, 6:12]) if res_output: np.save(model_name + str(rho) + "_res.npy", res) print("Result saved.") if data_output: np.save(model_name + str(rho) + "_data.npy", met) print("Data saved.")
3,621
25.82963
75
py
abess
abess-master/include/Spectra/DavidsonSymEigsSolver.h
// Copyright (C) 2020 Netherlands eScience Center <f.zapata@esciencecenter.nl> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_DAVIDSON_SYM_EIGS_SOLVER_H #define SPECTRA_DAVIDSON_SYM_EIGS_SOLVER_H #include <Eigen/Core> #include "JDSymEigsBase.h" #include "Util/SelectionRule.h" namespace Spectra { /// /// \ingroup EigenSolver /// /// This class implement the DPR correction for the Davidson algorithms. /// The algorithms in the Davidson family only differ in how the correction /// vectors are computed and optionally in the initial orthogonal basis set. /// /// the DPR correction compute the new correction vector using the following expression: /// \f[ correction = -(\boldsymbol{D} - \rho \boldsymbol{I})^{-1} \boldsymbol{r} \f] /// where /// \f$D\f$ is the diagonal of the target matrix, \f$\rho\f$ the Ritz eigenvalue, /// \f$I\f$ the identity matrix and \f$r\f$ the residue vector. /// template <typename OpType> class DavidsonSymEigsSolver : public JDSymEigsBase<DavidsonSymEigsSolver<OpType>, OpType> { private: using Index = Eigen::Index; using Scalar = typename OpType::Scalar; using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; Vector m_diagonal; public: DavidsonSymEigsSolver(OpType& op, Index nev) : JDSymEigsBase<DavidsonSymEigsSolver<OpType>, OpType>(op, nev) { m_diagonal.resize(this->m_matrix_operator.rows()); for (Index i = 0; i < op.rows(); i++) { m_diagonal(i) = op(i, i); } } /// Create initial search space based on the diagonal /// and the spectrum'target (highest or lowest) /// /// \param selection Spectrum section to target (e.g. lowest, etc.) /// \return Matrix with the initial orthonormal basis Matrix setup_initial_search_space(SortRule selection) const { std::vector<Eigen::Index> indices_sorted = argsort(selection, m_diagonal); Matrix initial_basis = Matrix::Zero(this->m_matrix_operator.rows(), this->m_initial_search_space_size); for (Index k = 0; k < this->m_initial_search_space_size; k++) { Index row = indices_sorted[k]; initial_basis(row, k) = 1.0; } return initial_basis; } /// Compute the corrections using the DPR method. /// /// \return New correction vectors. Matrix calculate_correction_vector() const { const Matrix& residues = this->m_ritz_pairs.residues(); const Vector& eigvals = this->m_ritz_pairs.ritz_values(); Matrix correction = Matrix::Zero(this->m_matrix_operator.rows(), this->m_correction_size); for (Index k = 0; k < this->m_correction_size; k++) { Vector tmp = eigvals(k) - m_diagonal.array(); correction.col(k) = residues.col(k).array() / tmp.array(); } return correction; } }; } // namespace Spectra #endif // SPECTRA_DAVIDSON_SYM_EIGS_SOLVER_H
3,169
33.835165
111
h
abess
abess-master/include/Spectra/GenEigsBase.h
// Copyright (C) 2018-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_GEN_EIGS_BASE_H #define SPECTRA_GEN_EIGS_BASE_H #include <Eigen/Core> #include <vector> // std::vector #include <cmath> // std::abs, std::pow, std::sqrt #include <algorithm> // std::min, std::copy #include <complex> // std::complex, std::conj, std::norm, std::abs #include <stdexcept> // std::invalid_argument #include "Util/Version.h" #include "Util/TypeTraits.h" #include "Util/SelectionRule.h" #include "Util/CompInfo.h" #include "Util/SimpleRandom.h" #include "MatOp/internal/ArnoldiOp.h" #include "LinAlg/UpperHessenbergQR.h" #include "LinAlg/DoubleShiftQR.h" #include "LinAlg/UpperHessenbergEigen.h" #include "LinAlg/Arnoldi.h" namespace Spectra { /// /// \ingroup EigenSolver /// /// This is the base class for general eigen solvers, mainly for internal use. /// It is kept here to provide the documentation for member functions of concrete eigen solvers /// such as GenEigsSolver and GenEigsRealShiftSolver. /// template <typename OpType, typename BOpType> class GenEigsBase { private: using Scalar = typename OpType::Scalar; using Index = Eigen::Index; using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using Array = Eigen::Array<Scalar, Eigen::Dynamic, 1>; using BoolArray = Eigen::Array<bool, Eigen::Dynamic, 1>; using MapMat = Eigen::Map<Matrix>; using MapVec = Eigen::Map<Vector>; using MapConstVec = Eigen::Map<const Vector>; using Complex = std::complex<Scalar>; using ComplexMatrix = Eigen::Matrix<Complex, Eigen::Dynamic, Eigen::Dynamic>; using ComplexVector = Eigen::Matrix<Complex, Eigen::Dynamic, 1>; using ArnoldiOpType = ArnoldiOp<Scalar, OpType, BOpType>; using ArnoldiFac = Arnoldi<Scalar, ArnoldiOpType>; protected: // clang-format off OpType& m_op; // object to conduct matrix operation, // e.g. matrix-vector product const Index m_n; // dimension of matrix A const Index m_nev; // number of eigenvalues requested const Index m_ncv; // dimension of Krylov subspace in the Arnoldi method Index m_nmatop; // number of matrix operations called Index m_niter; // number of restarting iterations ArnoldiFac m_fac; // Arnoldi factorization ComplexVector m_ritz_val; // Ritz values ComplexMatrix m_ritz_vec; // Ritz vectors ComplexVector m_ritz_est; // last row of m_ritz_vec, also called the Ritz estimates private: BoolArray m_ritz_conv; // indicator of the convergence of Ritz values CompInfo m_info; // status of the computation // clang-format on // Real Ritz values calculated from UpperHessenbergEigen have exact zero imaginary part // Complex Ritz values have exact conjugate pairs // So we use exact tests here static bool is_complex(const Complex& v) { return v.imag() != Scalar(0); } static bool is_conj(const Complex& v1, const Complex& v2) { return v1 == Eigen::numext::conj(v2); } // Implicitly restarted Arnoldi factorization void restart(Index k, SortRule selection) { using std::norm; if (k >= m_ncv) return; DoubleShiftQR<Scalar> decomp_ds(m_ncv); UpperHessenbergQR<Scalar> decomp_hb(m_ncv); Matrix Q = Matrix::Identity(m_ncv, m_ncv); for (Index i = k; i < m_ncv; i++) { if (is_complex(m_ritz_val[i]) && is_conj(m_ritz_val[i], m_ritz_val[i + 1])) { // H - mu * I = Q1 * R1 // H <- R1 * Q1 + mu * I = Q1' * H * Q1 // H - conj(mu) * I = Q2 * R2 // H <- R2 * Q2 + conj(mu) * I = Q2' * H * Q2 // // (H - mu * I) * (H - conj(mu) * I) = Q1 * Q2 * R2 * R1 = Q * R const Scalar s = Scalar(2) * m_ritz_val[i].real(); const Scalar t = norm(m_ritz_val[i]); decomp_ds.compute(m_fac.matrix_H(), s, t); // Q -> Q * Qi decomp_ds.apply_YQ(Q); // H -> Q'HQ // Matrix Q = Matrix::Identity(m_ncv, m_ncv); // decomp_ds.apply_YQ(Q); // m_fac_H = Q.transpose() * m_fac_H * Q; m_fac.compress_H(decomp_ds); i++; } else { // QR decomposition of H - mu * I, mu is real decomp_hb.compute(m_fac.matrix_H(), m_ritz_val[i].real()); // Q -> Q * Qi decomp_hb.apply_YQ(Q); // H -> Q'HQ = RQ + mu * I m_fac.compress_H(decomp_hb); } } m_fac.compress_V(Q); m_fac.factorize_from(k, m_ncv, m_nmatop); retrieve_ritzpair(selection); } // Calculates the number of converged Ritz values Index num_converged(const Scalar& tol) { using std::pow; // The machine precision, ~= 1e-16 for the "double" type constexpr Scalar eps = TypeTraits<Scalar>::epsilon(); // std::pow() is not constexpr, so we do not declare eps23 to be constexpr // But most compilers should be able to compute eps23 at compile time const Scalar eps23 = pow(eps, Scalar(2) / 3); // thresh = tol * max(eps23, abs(theta)), theta for Ritz value Array thresh = tol * m_ritz_val.head(m_nev).array().abs().max(eps23); Array resid = m_ritz_est.head(m_nev).array().abs() * m_fac.f_norm(); // Converged "wanted" Ritz values m_ritz_conv = (resid < thresh); return m_ritz_conv.count(); } // Returns the adjusted nev for restarting Index nev_adjusted(Index nconv) { using std::abs; // A very small value, but 1.0 / near_0 does not overflow // ~= 1e-307 for the "double" type constexpr Scalar near_0 = TypeTraits<Scalar>::min() * Scalar(10); Index nev_new = m_nev; for (Index i = m_nev; i < m_ncv; i++) if (abs(m_ritz_est[i]) < near_0) nev_new++; // Adjust nev_new, according to dnaup2.f line 660~674 in ARPACK nev_new += (std::min)(nconv, (m_ncv - nev_new) / 2); if (nev_new == 1 && m_ncv >= 6) nev_new = m_ncv / 2; else if (nev_new == 1 && m_ncv > 3) nev_new = 2; if (nev_new > m_ncv - 2) nev_new = m_ncv - 2; // Increase nev by one if ritz_val[nev - 1] and // ritz_val[nev] are conjugate pairs if (is_complex(m_ritz_val[nev_new - 1]) && is_conj(m_ritz_val[nev_new - 1], m_ritz_val[nev_new])) { nev_new++; } return nev_new; } // Retrieves and sorts Ritz values and Ritz vectors void retrieve_ritzpair(SortRule selection) { UpperHessenbergEigen<Scalar> decomp(m_fac.matrix_H()); const ComplexVector& evals = decomp.eigenvalues(); ComplexMatrix evecs = decomp.eigenvectors(); // Sort Ritz values and put the wanted ones at the beginning std::vector<Index> ind; switch (selection) { case SortRule::LargestMagn: { SortEigenvalue<Complex, SortRule::LargestMagn> sorting(evals.data(), m_ncv); sorting.swap(ind); break; } case SortRule::LargestReal: { SortEigenvalue<Complex, SortRule::LargestReal> sorting(evals.data(), m_ncv); sorting.swap(ind); break; } case SortRule::LargestImag: { SortEigenvalue<Complex, SortRule::LargestImag> sorting(evals.data(), m_ncv); sorting.swap(ind); break; } case SortRule::SmallestMagn: { SortEigenvalue<Complex, SortRule::SmallestMagn> sorting(evals.data(), m_ncv); sorting.swap(ind); break; } case SortRule::SmallestReal: { SortEigenvalue<Complex, SortRule::SmallestReal> sorting(evals.data(), m_ncv); sorting.swap(ind); break; } case SortRule::SmallestImag: { SortEigenvalue<Complex, SortRule::SmallestImag> sorting(evals.data(), m_ncv); sorting.swap(ind); break; } default: throw std::invalid_argument("unsupported selection rule"); } // Copy the Ritz values and vectors to m_ritz_val and m_ritz_vec, respectively for (Index i = 0; i < m_ncv; i++) { m_ritz_val[i] = evals[ind[i]]; m_ritz_est[i] = evecs(m_ncv - 1, ind[i]); } for (Index i = 0; i < m_nev; i++) { m_ritz_vec.col(i).noalias() = evecs.col(ind[i]); } } protected: // Sorts the first nev Ritz pairs in the specified order // This is used to return the final results virtual void sort_ritzpair(SortRule sort_rule) { std::vector<Index> ind; switch (sort_rule) { case SortRule::LargestMagn: { SortEigenvalue<Complex, SortRule::LargestMagn> sorting(m_ritz_val.data(), m_nev); sorting.swap(ind); break; } case SortRule::LargestReal: { SortEigenvalue<Complex, SortRule::LargestReal> sorting(m_ritz_val.data(), m_nev); sorting.swap(ind); break; } case SortRule::LargestImag: { SortEigenvalue<Complex, SortRule::LargestImag> sorting(m_ritz_val.data(), m_nev); sorting.swap(ind); break; } case SortRule::SmallestMagn: { SortEigenvalue<Complex, SortRule::SmallestMagn> sorting(m_ritz_val.data(), m_nev); sorting.swap(ind); break; } case SortRule::SmallestReal: { SortEigenvalue<Complex, SortRule::SmallestReal> sorting(m_ritz_val.data(), m_nev); sorting.swap(ind); break; } case SortRule::SmallestImag: { SortEigenvalue<Complex, SortRule::SmallestImag> sorting(m_ritz_val.data(), m_nev); sorting.swap(ind); break; } default: throw std::invalid_argument("unsupported sorting rule"); } ComplexVector new_ritz_val(m_ncv); ComplexMatrix new_ritz_vec(m_ncv, m_nev); BoolArray new_ritz_conv(m_nev); for (Index i = 0; i < m_nev; i++) { new_ritz_val[i] = m_ritz_val[ind[i]]; new_ritz_vec.col(i).noalias() = m_ritz_vec.col(ind[i]); new_ritz_conv[i] = m_ritz_conv[ind[i]]; } m_ritz_val.swap(new_ritz_val); m_ritz_vec.swap(new_ritz_vec); m_ritz_conv.swap(new_ritz_conv); } public: /// \cond GenEigsBase(OpType& op, const BOpType& Bop, Index nev, Index ncv) : m_op(op), m_n(m_op.rows()), m_nev(nev), m_ncv(ncv > m_n ? m_n : ncv), m_nmatop(0), m_niter(0), m_fac(ArnoldiOpType(op, Bop), m_ncv), m_info(CompInfo::NotComputed) { if (nev < 1 || nev > m_n - 2) throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 2, n is the size of matrix"); if (ncv < nev + 2 || ncv > m_n) throw std::invalid_argument("ncv must satisfy nev + 2 <= ncv <= n, n is the size of matrix"); } /// /// Virtual destructor /// virtual ~GenEigsBase() {} /// \endcond /// /// Initializes the solver by providing an initial residual vector. /// /// \param init_resid Pointer to the initial residual vector. /// /// **Spectra** (and also **ARPACK**) uses an iterative algorithm /// to find eigenvalues. This function allows the user to provide the initial /// residual vector. /// void init(const Scalar* init_resid) { // Reset all matrices/vectors to zero m_ritz_val.resize(m_ncv); m_ritz_vec.resize(m_ncv, m_nev); m_ritz_est.resize(m_ncv); m_ritz_conv.resize(m_nev); m_ritz_val.setZero(); m_ritz_vec.setZero(); m_ritz_est.setZero(); m_ritz_conv.setZero(); m_nmatop = 0; m_niter = 0; // Initialize the Arnoldi factorization MapConstVec v0(init_resid, m_n); m_fac.init(v0, m_nmatop); } /// /// Initializes the solver by providing a random initial residual vector. /// /// This overloaded function generates a random initial residual vector /// (with a fixed random seed) for the algorithm. Elements in the vector /// follow independent Uniform(-0.5, 0.5) distribution. /// void init() { SimpleRandom<Scalar> rng(0); Vector init_resid = rng.random_vec(m_n); init(init_resid.data()); } /// /// Conducts the major computation procedure. /// /// \param selection An enumeration value indicating the selection rule of /// the requested eigenvalues, for example `SortRule::LargestMagn` /// to retrieve eigenvalues with the largest magnitude. /// The full list of enumeration values can be found in /// \ref Enumerations. /// \param maxit Maximum number of iterations allowed in the algorithm. /// \param tol Precision parameter for the calculated eigenvalues. /// \param sorting Rule to sort the eigenvalues and eigenvectors. /// Supported values are /// `SortRule::LargestMagn`, `SortRule::LargestReal`, /// `SortRule::LargestImag`, `SortRule::SmallestMagn`, /// `SortRule::SmallestReal` and `SortRule::SmallestImag`, /// for example `SortRule::LargestMagn` indicates that eigenvalues /// with largest magnitude come first. /// Note that this argument is only used to /// **sort** the final result, and the **selection** rule /// (e.g. selecting the largest or smallest eigenvalues in the /// full spectrum) is specified by the parameter `selection`. /// /// \return Number of converged eigenvalues. /// Index compute(SortRule selection = SortRule::LargestMagn, Index maxit = 1000, Scalar tol = 1e-10, SortRule sorting = SortRule::LargestMagn) { // The m-step Arnoldi factorization m_fac.factorize_from(1, m_ncv, m_nmatop); retrieve_ritzpair(selection); // Restarting Index i, nconv = 0, nev_adj; for (i = 0; i < maxit; i++) { nconv = num_converged(tol); if (nconv >= m_nev) break; nev_adj = nev_adjusted(nconv); restart(nev_adj, selection); } // Sorting results sort_ritzpair(sorting); m_niter += i + 1; m_info = (nconv >= m_nev) ? CompInfo::Successful : CompInfo::NotConverging; return (std::min)(m_nev, nconv); } /// /// Returns the status of the computation. /// The full list of enumeration values can be found in \ref Enumerations. /// CompInfo info() const { return m_info; } /// /// Returns the number of iterations used in the computation. /// Index num_iterations() const { return m_niter; } /// /// Returns the number of matrix operations used in the computation. /// Index num_operations() const { return m_nmatop; } /// /// Returns the converged eigenvalues. /// /// \return A complex-valued vector containing the eigenvalues. /// Returned vector type will be `Eigen::Vector<std::complex<Scalar>, ...>`, depending on /// the template parameter `Scalar` defined. /// ComplexVector eigenvalues() const { const Index nconv = m_ritz_conv.cast<Index>().sum(); ComplexVector res(nconv); if (!nconv) return res; Index j = 0; for (Index i = 0; i < m_nev; i++) { if (m_ritz_conv[i]) { res[j] = m_ritz_val[i]; j++; } } return res; } /// /// Returns the eigenvectors associated with the converged eigenvalues. /// /// \param nvec The number of eigenvectors to return. /// /// \return A complex-valued matrix containing the eigenvectors. /// Returned matrix type will be `Eigen::Matrix<std::complex<Scalar>, ...>`, /// depending on the template parameter `Scalar` defined. /// ComplexMatrix eigenvectors(Index nvec) const { const Index nconv = m_ritz_conv.cast<Index>().sum(); nvec = (std::min)(nvec, nconv); ComplexMatrix res(m_n, nvec); if (!nvec) return res; ComplexMatrix ritz_vec_conv(m_ncv, nvec); Index j = 0; for (Index i = 0; i < m_nev && j < nvec; i++) { if (m_ritz_conv[i]) { ritz_vec_conv.col(j).noalias() = m_ritz_vec.col(i); j++; } } res.noalias() = m_fac.matrix_V() * ritz_vec_conv; return res; } /// /// Returns all converged eigenvectors. /// ComplexMatrix eigenvectors() const { return eigenvectors(m_nev); } }; } // namespace Spectra #endif // SPECTRA_GEN_EIGS_BASE_H
18,186
33.121951
105
h
abess
abess-master/include/Spectra/GenEigsComplexShiftSolver.h
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_GEN_EIGS_COMPLEX_SHIFT_SOLVER_H #define SPECTRA_GEN_EIGS_COMPLEX_SHIFT_SOLVER_H #include <Eigen/Core> #include "GenEigsBase.h" #include "Util/SelectionRule.h" #include "MatOp/DenseGenComplexShiftSolve.h" namespace Spectra { /// /// \ingroup EigenSolver /// /// This class implements the eigen solver for general real matrices with /// a complex shift value in the **shift-and-invert mode**. The background /// knowledge of the shift-and-invert mode can be found in the documentation /// of the SymEigsShiftSolver class. /// /// \tparam OpType The name of the matrix operation class. Users could either /// use the wrapper classes such as DenseGenComplexShiftSolve and /// SparseGenComplexShiftSolve, or define their own that implements the type /// definition `Scalar` and all the public member functions as in /// DenseGenComplexShiftSolve. /// template <typename OpType = DenseGenComplexShiftSolve<double>> class GenEigsComplexShiftSolver : public GenEigsBase<OpType, IdentityBOp> { private: using Scalar = typename OpType::Scalar; using Index = Eigen::Index; using Complex = std::complex<Scalar>; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using ComplexArray = Eigen::Array<Complex, Eigen::Dynamic, 1>; using Base = GenEigsBase<OpType, IdentityBOp>; using Base::m_op; using Base::m_n; using Base::m_nev; using Base::m_fac; using Base::m_ritz_val; using Base::m_ritz_vec; const Scalar m_sigmar; const Scalar m_sigmai; // First transform back the Ritz values, and then sort void sort_ritzpair(SortRule sort_rule) override { using std::abs; using std::sqrt; using std::norm; // The eigenvalues we get from the iteration is // nu = 0.5 * (1 / (lambda - sigma) + 1 / (lambda - conj(sigma))) // So the eigenvalues of the original problem is // 1 \pm sqrt(1 - 4 * nu^2 * sigmai^2) // lambda = sigmar + ----------------------------------- // 2 * nu // We need to pick the correct root // Let (lambdaj, vj) be the j-th eigen pair, then A * vj = lambdaj * vj // and inv(A - r * I) * vj = 1 / (lambdaj - r) * vj // where r is any shift value. // We can use this identity to determine lambdaj // // op(v) computes Re(inv(A - r * I) * v) for any real v // If r is real, then op(v) is also real. Let a = Re(vj), b = Im(vj), // then op(vj) = op(a) + op(b) * i // By comparing op(vj) and [1 / (lambdaj - r) * vj], we can determine // which one is the correct root // Select a random shift value SimpleRandom<Scalar> rng(0); const Scalar shiftr = rng.random() * m_sigmar + rng.random(); const Complex shift = Complex(shiftr, Scalar(0)); m_op.set_shift(shiftr, Scalar(0)); // Calculate inv(A - r * I) * vj Vector v_real(m_n), v_imag(m_n), OPv_real(m_n), OPv_imag(m_n); constexpr Scalar eps = TypeTraits<Scalar>::epsilon(); for (Index i = 0; i < m_nev; i++) { v_real.noalias() = m_fac.matrix_V() * m_ritz_vec.col(i).real(); v_imag.noalias() = m_fac.matrix_V() * m_ritz_vec.col(i).imag(); m_op.perform_op(v_real.data(), OPv_real.data()); m_op.perform_op(v_imag.data(), OPv_imag.data()); // Two roots computed from the quadratic equation const Complex nu = m_ritz_val[i]; const Complex root_part1 = m_sigmar + Scalar(0.5) / nu; const Complex root_part2 = Scalar(0.5) * sqrt(Scalar(1) - Scalar(4) * m_sigmai * m_sigmai * (nu * nu)) / nu; const Complex root1 = root_part1 + root_part2; const Complex root2 = root_part1 - root_part2; // Test roots Scalar err1 = Scalar(0), err2 = Scalar(0); for (int k = 0; k < m_n; k++) { const Complex rhs1 = Complex(v_real[k], v_imag[k]) / (root1 - shift); const Complex rhs2 = Complex(v_real[k], v_imag[k]) / (root2 - shift); const Complex OPv = Complex(OPv_real[k], OPv_imag[k]); err1 += norm(OPv - rhs1); err2 += norm(OPv - rhs2); } const Complex lambdaj = (err1 < err2) ? root1 : root2; m_ritz_val[i] = lambdaj; if (abs(Eigen::numext::imag(lambdaj)) > eps) { m_ritz_val[i + 1] = Eigen::numext::conj(lambdaj); i++; } else { m_ritz_val[i] = Complex(Eigen::numext::real(lambdaj), Scalar(0)); } } Base::sort_ritzpair(sort_rule); } public: /// /// Constructor to create a eigen solver object using the shift-and-invert mode. /// /// \param op The matrix operation object that implements /// the complex shift-solve operation of \f$A\f$: calculating /// \f$\mathrm{Re}\{(A-\sigma I)^{-1}v\}\f$ for any vector \f$v\f$. Users could either /// create the object from the wrapper class such as DenseGenComplexShiftSolve, or /// define their own that implements all the public members /// as in DenseGenComplexShiftSolve. /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$, /// where \f$n\f$ is the size of matrix. /// \param ncv Parameter that controls the convergence speed of the algorithm. /// Typically a larger `ncv` means faster convergence, but it may /// also result in greater memory use and more matrix operations /// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$, /// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$. /// \param sigmar The real part of the shift. /// \param sigmai The imaginary part of the shift. /// GenEigsComplexShiftSolver(OpType& op, Index nev, Index ncv, const Scalar& sigmar, const Scalar& sigmai) : Base(op, IdentityBOp(), nev, ncv), m_sigmar(sigmar), m_sigmai(sigmai) { op.set_shift(m_sigmar, m_sigmai); } }; } // namespace Spectra #endif // SPECTRA_GEN_EIGS_COMPLEX_SHIFT_SOLVER_H
6,755
41.225
120
h
abess
abess-master/include/Spectra/GenEigsRealShiftSolver.h
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_GEN_EIGS_REAL_SHIFT_SOLVER_H #define SPECTRA_GEN_EIGS_REAL_SHIFT_SOLVER_H #include <Eigen/Core> #include "GenEigsBase.h" #include "Util/SelectionRule.h" #include "MatOp/DenseGenRealShiftSolve.h" namespace Spectra { /// /// \ingroup EigenSolver /// /// This class implements the eigen solver for general real matrices with /// a real shift value in the **shift-and-invert mode**. The background /// knowledge of the shift-and-invert mode can be found in the documentation /// of the SymEigsShiftSolver class. /// /// \tparam OpType The name of the matrix operation class. Users could either /// use the wrapper classes such as DenseGenRealShiftSolve and /// SparseGenRealShiftSolve, or define their own that implements the type /// definition `Scalar` and all the public member functions as in /// DenseGenRealShiftSolve. /// template <typename OpType = DenseGenRealShiftSolve<double>> class GenEigsRealShiftSolver : public GenEigsBase<OpType, IdentityBOp> { private: using Scalar = typename OpType::Scalar; using Index = Eigen::Index; using Complex = std::complex<Scalar>; using ComplexArray = Eigen::Array<Complex, Eigen::Dynamic, 1>; using Base = GenEigsBase<OpType, IdentityBOp>; using Base::m_nev; using Base::m_ritz_val; const Scalar m_sigma; // First transform back the Ritz values, and then sort void sort_ritzpair(SortRule sort_rule) override { // The eigenvalues we get from the iteration is nu = 1 / (lambda - sigma) // So the eigenvalues of the original problem is lambda = 1 / nu + sigma m_ritz_val.head(m_nev) = Scalar(1) / m_ritz_val.head(m_nev).array() + m_sigma; Base::sort_ritzpair(sort_rule); } public: /// /// Constructor to create a eigen solver object using the shift-and-invert mode. /// /// \param op The matrix operation object that implements /// the shift-solve operation of \f$A\f$: calculating /// \f$(A-\sigma I)^{-1}v\f$ for any vector \f$v\f$. Users could either /// create the object from the wrapper class such as DenseGenRealShiftSolve, or /// define their own that implements all the public members /// as in DenseGenRealShiftSolve. /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$, /// where \f$n\f$ is the size of matrix. /// \param ncv Parameter that controls the convergence speed of the algorithm. /// Typically a larger `ncv` means faster convergence, but it may /// also result in greater memory use and more matrix operations /// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$, /// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$. /// \param sigma The real-valued shift. /// GenEigsRealShiftSolver(OpType& op, Index nev, Index ncv, const Scalar& sigma) : Base(op, IdentityBOp(), nev, ncv), m_sigma(sigma) { op.set_shift(m_sigma); } }; } // namespace Spectra #endif // SPECTRA_GEN_EIGS_REAL_SHIFT_SOLVER_H
3,518
39.918605
98
h
abess
abess-master/include/Spectra/GenEigsSolver.h
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_GEN_EIGS_SOLVER_H #define SPECTRA_GEN_EIGS_SOLVER_H #include <Eigen/Core> #include "GenEigsBase.h" #include "Util/SelectionRule.h" #include "MatOp/DenseGenMatProd.h" namespace Spectra { /// /// \ingroup EigenSolver /// /// This class implements the eigen solver for general real matrices, i.e., /// to solve \f$Ax=\lambda x\f$ for a possibly non-symmetric \f$A\f$ matrix. /// /// Most of the background information documented in the SymEigsSolver class /// also applies to the GenEigsSolver class here, except that the eigenvalues /// and eigenvectors of a general matrix can now be complex-valued. /// /// \tparam OpType The name of the matrix operation class. Users could either /// use the wrapper classes such as DenseGenMatProd and /// SparseGenMatProd, or define their own that implements the type /// definition `Scalar` and all the public member functions as in /// DenseGenMatProd. /// /// An example that illustrates the usage of GenEigsSolver is give below: /// /// \code{.cpp} /// #include <Eigen/Core> /// #include <Spectra/GenEigsSolver.h> /// // <Spectra/MatOp/DenseGenMatProd.h> is implicitly included /// #include <iostream> /// /// using namespace Spectra; /// /// int main() /// { /// // We are going to calculate the eigenvalues of M /// Eigen::MatrixXd M = Eigen::MatrixXd::Random(10, 10); /// /// // Construct matrix operation object using the wrapper class /// DenseGenMatProd<double> op(M); /// /// // Construct eigen solver object, requesting the largest /// // (in magnitude, or norm) three eigenvalues /// GenEigsSolver<DenseGenMatProd<double>> eigs(op, 3, 6); /// /// // Initialize and compute /// eigs.init(); /// int nconv = eigs.compute(SortRule::LargestMagn); /// /// // Retrieve results /// Eigen::VectorXcd evalues; /// if (eigs.info() == CompInfo::Successful) /// evalues = eigs.eigenvalues(); /// /// std::cout << "Eigenvalues found:\n" << evalues << std::endl; /// /// return 0; /// } /// \endcode /// /// And also an example for sparse matrices: /// /// \code{.cpp} /// #include <Eigen/Core> /// #include <Eigen/SparseCore> /// #include <Spectra/GenEigsSolver.h> /// #include <Spectra/MatOp/SparseGenMatProd.h> /// #include <iostream> /// /// using namespace Spectra; /// /// int main() /// { /// // A band matrix with 1 on the main diagonal, 2 on the below-main subdiagonal, /// // and 3 on the above-main subdiagonal /// const int n = 10; /// Eigen::SparseMatrix<double> M(n, n); /// M.reserve(Eigen::VectorXi::Constant(n, 3)); /// for (int i = 0; i < n; i++) /// { /// M.insert(i, i) = 1.0; /// if (i > 0) /// M.insert(i - 1, i) = 3.0; /// if (i < n - 1) /// M.insert(i + 1, i) = 2.0; /// } /// /// // Construct matrix operation object using the wrapper class SparseGenMatProd /// SparseGenMatProd<double> op(M); /// /// // Construct eigen solver object, requesting the largest three eigenvalues /// GenEigsSolver<SparseGenMatProd<double>> eigs(op, 3, 6); /// /// // Initialize and compute /// eigs.init(); /// int nconv = eigs.compute(SortRule::LargestMagn); /// /// // Retrieve results /// Eigen::VectorXcd evalues; /// if (eigs.info() == CompInfo::Successful) /// evalues = eigs.eigenvalues(); /// /// std::cout << "Eigenvalues found:\n" << evalues << std::endl; /// /// return 0; /// } /// \endcode template <typename OpType = DenseGenMatProd<double>> class GenEigsSolver : public GenEigsBase<OpType, IdentityBOp> { private: using Index = Eigen::Index; public: /// /// Constructor to create a solver object. /// /// \param op The matrix operation object that implements /// the matrix-vector multiplication operation of \f$A\f$: /// calculating \f$Av\f$ for any vector \f$v\f$. Users could either /// create the object from the wrapper class such as DenseGenMatProd, or /// define their own that implements all the public members /// as in DenseGenMatProd. /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$, /// where \f$n\f$ is the size of matrix. /// \param ncv Parameter that controls the convergence speed of the algorithm. /// Typically a larger `ncv` means faster convergence, but it may /// also result in greater memory use and more matrix operations /// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$, /// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$. /// GenEigsSolver(OpType& op, Index nev, Index ncv) : GenEigsBase<OpType, IdentityBOp>(op, IdentityBOp(), nev, ncv) {} }; } // namespace Spectra #endif // SPECTRA_GEN_EIGS_SOLVER_H
5,233
33.893333
96
h
abess
abess-master/include/Spectra/JDSymEigsBase.h
// Copyright (C) 2020 Netherlands eScience Center <J.Wehner@esciencecenter.nl> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_JD_SYM_EIGS_BASE_H #define SPECTRA_JD_SYM_EIGS_BASE_H #include <Eigen/Core> #include <vector> // std::vector #include <cmath> // std::abs, std::pow #include <algorithm> // std::min #include <stdexcept> // std::invalid_argument #include <iostream> #include "Util/SelectionRule.h" #include "Util/CompInfo.h" #include "LinAlg/SearchSpace.h" #include "LinAlg/RitzPairs.h" namespace Spectra { /// /// \ingroup EigenSolver /// /// This is the base class for symmetric JD eigen solvers, mainly for internal use. /// It is kept here to provide the documentation for member functions of concrete eigen solvers /// such as DavidsonSymEigsSolver. /// /// This class uses the CRTP method to call functions from the derived class. /// template <typename Derived, typename OpType> class JDSymEigsBase { protected: using Index = Eigen::Index; using Scalar = typename OpType::Scalar; using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; const OpType& m_matrix_operator; // object to conduct matrix operation, // e.g. matrix-vector product Index niter_ = 0; const Index m_number_eigenvalues; // number of eigenvalues requested Index m_max_search_space_size; Index m_initial_search_space_size; Index m_correction_size; // how many correction vectors are added in each iteration RitzPairs<Scalar> m_ritz_pairs; // Ritz eigen pair structure SearchSpace<Scalar> m_search_space; // search space private: CompInfo m_info = CompInfo::NotComputed; // status of the computation void check_argument() const { if (m_number_eigenvalues < 1 || m_number_eigenvalues > m_matrix_operator.cols() - 1) throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 1, n is the size of matrix"); } public: JDSymEigsBase(OpType& op, Index nev) : m_matrix_operator(op), m_number_eigenvalues(nev), m_max_search_space_size(10 * m_number_eigenvalues), m_initial_search_space_size(2 * m_number_eigenvalues), m_correction_size(m_number_eigenvalues) { check_argument(); //TODO better input validation and checks if (op.cols() < m_max_search_space_size) { m_max_search_space_size = op.cols(); } if (op.cols() < m_initial_search_space_size + m_correction_size) { m_initial_search_space_size = op.cols() / 3; m_correction_size = op.cols() / 3; } } /// /// Sets the Maxmium SearchspaceSize after which is deflated /// void set_max_search_space_size(Index max_search_space_size) { m_max_search_space_size = max_search_space_size; } /// /// Sets how many correction vectors are added in each iteration /// void set_correction_size(Index correction_size) { m_correction_size = correction_size; } /// /// Sets the Initial SearchspaceSize for Ritz values /// void set_initial_search_space_size(Index initial_search_space_size) { m_initial_search_space_size = initial_search_space_size; } /// /// Virtual destructor /// virtual ~JDSymEigsBase() {} /// /// Returns the status of the computation. /// The full list of enumeration values can be found in \ref Enumerations. /// CompInfo info() const { return m_info; } /// /// Returns the number of iterations used in the computation. /// Index num_iterations() const { return niter_; } Vector eigenvalues() const { return m_ritz_pairs.ritz_values().head(m_number_eigenvalues); } Matrix eigenvectors() const { return m_ritz_pairs.ritz_vectors().leftCols(m_number_eigenvalues); } Index compute(SortRule selection = SortRule::LargestMagn, Index maxit = 100, Scalar tol = 100 * Eigen::NumTraits<Scalar>::dummy_precision()) { Derived& derived = static_cast<Derived&>(*this); Matrix intial_space = derived.setup_initial_search_space(selection); return compute_with_guess(intial_space, selection, maxit, tol); } Index compute_with_guess(const Eigen::Ref<const Matrix>& initial_space, SortRule selection = SortRule::LargestMagn, Index maxit = 100, Scalar tol = 100 * Eigen::NumTraits<Scalar>::dummy_precision()) { m_search_space.initialize_search_space(initial_space); niter_ = 0; for (niter_ = 0; niter_ < maxit; niter_++) { bool do_restart = (m_search_space.size() > m_max_search_space_size); if (do_restart) { m_search_space.restart(m_ritz_pairs, m_initial_search_space_size); } m_search_space.update_operator_basis_product(m_matrix_operator); Eigen::ComputationInfo small_problem_info = m_ritz_pairs.compute_eigen_pairs(m_search_space); if (small_problem_info != Eigen::ComputationInfo::Success) { m_info = CompInfo::NumericalIssue; break; } m_ritz_pairs.sort(selection); bool converged = m_ritz_pairs.check_convergence(tol, m_number_eigenvalues); if (converged) { m_info = CompInfo::Successful; break; } else if (niter_ == maxit - 1) { m_info = CompInfo::NotConverging; break; } Derived& derived = static_cast<Derived&>(*this); Matrix corr_vect = derived.calculate_correction_vector(); m_search_space.extend_basis(corr_vect); } return (m_ritz_pairs.converged_eigenvalues()).template cast<Index>().head(m_number_eigenvalues).sum(); } }; } // namespace Spectra #endif // SPECTRA_JD_SYM_EIGS_BASE_H
6,303
33.26087
110
h
abess
abess-master/include/Spectra/SymEigsBase.h
// Copyright (C) 2018-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_SYM_EIGS_BASE_H #define SPECTRA_SYM_EIGS_BASE_H #include <Eigen/Core> #include <vector> // std::vector #include <cmath> // std::abs, std::pow #include <algorithm> // std::min #include <stdexcept> // std::invalid_argument #include <utility> // std::move #include "Util/Version.h" #include "Util/TypeTraits.h" #include "Util/SelectionRule.h" #include "Util/CompInfo.h" #include "Util/SimpleRandom.h" #include "MatOp/internal/ArnoldiOp.h" #include "LinAlg/UpperHessenbergQR.h" #include "LinAlg/TridiagEigen.h" #include "LinAlg/Lanczos.h" namespace Spectra { /// /// \defgroup EigenSolver Eigen Solvers /// /// Eigen solvers for different types of problems. /// /// /// \ingroup EigenSolver /// /// This is the base class for symmetric eigen solvers, mainly for internal use. /// It is kept here to provide the documentation for member functions of concrete eigen solvers /// such as SymEigsSolver and SymEigsShiftSolver. /// template <typename OpType, typename BOpType> class SymEigsBase { private: using Scalar = typename OpType::Scalar; using Index = Eigen::Index; using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using Array = Eigen::Array<Scalar, Eigen::Dynamic, 1>; using BoolArray = Eigen::Array<bool, Eigen::Dynamic, 1>; using MapMat = Eigen::Map<Matrix>; using MapVec = Eigen::Map<Vector>; using MapConstVec = Eigen::Map<const Vector>; using ArnoldiOpType = ArnoldiOp<Scalar, OpType, BOpType>; using LanczosFac = Lanczos<Scalar, ArnoldiOpType>; protected: // clang-format off // In SymEigsSolver and SymEigsShiftSolver, the A operator is an lvalue provided by // the user. In SymGEigsSolver, the A operator is an rvalue. To avoid copying objects, // we use the following scheme: // 1. If the op parameter in the constructor is an lvalue, make m_op a const reference to op // 2. If op is an rvalue, move op to m_op_container, and then make m_op a const // reference to m_op_container[0] std::vector<OpType> m_op_container; const OpType& m_op; // matrix operator for A const Index m_n; // dimension of matrix A const Index m_nev; // number of eigenvalues requested const Index m_ncv; // dimension of Krylov subspace in the Lanczos method Index m_nmatop; // number of matrix operations called Index m_niter; // number of restarting iterations LanczosFac m_fac; // Lanczos factorization Vector m_ritz_val; // Ritz values private: Matrix m_ritz_vec; // Ritz vectors Vector m_ritz_est; // last row of m_ritz_vec, also called the Ritz estimates BoolArray m_ritz_conv; // indicator of the convergence of Ritz values CompInfo m_info; // status of the computation // clang-format on // Move rvalue object to the container static std::vector<OpType> create_op_container(OpType&& rval) { std::vector<OpType> container; container.emplace_back(std::move(rval)); return container; } // Implicitly restarted Lanczos factorization void restart(Index k, SortRule selection) { using std::abs; if (k >= m_ncv) return; TridiagQR<Scalar> decomp(m_ncv); Matrix Q = Matrix::Identity(m_ncv, m_ncv); // Apply large shifts first const int nshift = m_ncv - k; Vector shifts = m_ritz_val.tail(nshift); std::sort(shifts.data(), shifts.data() + nshift, [](const Scalar& v1, const Scalar& v2) { return abs(v1) > abs(v2); }); for (Index i = 0; i < nshift; i++) { // QR decomposition of H-mu*I, mu is the shift decomp.compute(m_fac.matrix_H(), shifts[i]); // Q -> Q * Qi decomp.apply_YQ(Q); // H -> Q'HQ // Since QR = H - mu * I, we have H = QR + mu * I // and therefore Q'HQ = RQ + mu * I m_fac.compress_H(decomp); } m_fac.compress_V(Q); m_fac.factorize_from(k, m_ncv, m_nmatop); retrieve_ritzpair(selection); } // Calculates the number of converged Ritz values Index num_converged(const Scalar& tol) { using std::pow; // The machine precision, ~= 1e-16 for the "double" type constexpr Scalar eps = TypeTraits<Scalar>::epsilon(); // std::pow() is not constexpr, so we do not declare eps23 to be constexpr // But most compilers should be able to compute eps23 at compile time const Scalar eps23 = pow(eps, Scalar(2) / 3); // thresh = tol * max(eps23, abs(theta)), theta for Ritz value Array thresh = tol * m_ritz_val.head(m_nev).array().abs().max(eps23); Array resid = m_ritz_est.head(m_nev).array().abs() * m_fac.f_norm(); // Converged "wanted" Ritz values m_ritz_conv = (resid < thresh); return m_ritz_conv.count(); } // Returns the adjusted nev for restarting Index nev_adjusted(Index nconv) { using std::abs; // A very small value, but 1.0 / near_0 does not overflow // ~= 1e-307 for the "double" type constexpr Scalar near_0 = TypeTraits<Scalar>::min() * Scalar(10); Index nev_new = m_nev; for (Index i = m_nev; i < m_ncv; i++) if (abs(m_ritz_est[i]) < near_0) nev_new++; // Adjust nev_new, according to dsaup2.f line 677~684 in ARPACK nev_new += (std::min)(nconv, (m_ncv - nev_new) / 2); if (nev_new == 1 && m_ncv >= 6) nev_new = m_ncv / 2; else if (nev_new == 1 && m_ncv > 2) nev_new = 2; if (nev_new > m_ncv - 1) nev_new = m_ncv - 1; return nev_new; } // Retrieves and sorts Ritz values and Ritz vectors void retrieve_ritzpair(SortRule selection) { TridiagEigen<Scalar> decomp(m_fac.matrix_H()); const Vector& evals = decomp.eigenvalues(); const Matrix& evecs = decomp.eigenvectors(); // Sort Ritz values and put the wanted ones at the beginning std::vector<Index> ind = argsort(selection, evals, m_ncv); // Copy the Ritz values and vectors to m_ritz_val and m_ritz_vec, respectively for (Index i = 0; i < m_ncv; i++) { m_ritz_val[i] = evals[ind[i]]; m_ritz_est[i] = evecs(m_ncv - 1, ind[i]); } for (Index i = 0; i < m_nev; i++) { m_ritz_vec.col(i).noalias() = evecs.col(ind[i]); } } protected: // Sorts the first nev Ritz pairs in the specified order // This is used to return the final results virtual void sort_ritzpair(SortRule sort_rule) { if ((sort_rule != SortRule::LargestAlge) && (sort_rule != SortRule::LargestMagn) && (sort_rule != SortRule::SmallestAlge) && (sort_rule != SortRule::SmallestMagn)) throw std::invalid_argument("unsupported sorting rule"); std::vector<Index> ind = argsort(sort_rule, m_ritz_val, m_nev); Vector new_ritz_val(m_ncv); Matrix new_ritz_vec(m_ncv, m_nev); BoolArray new_ritz_conv(m_nev); for (Index i = 0; i < m_nev; i++) { new_ritz_val[i] = m_ritz_val[ind[i]]; new_ritz_vec.col(i).noalias() = m_ritz_vec.col(ind[i]); new_ritz_conv[i] = m_ritz_conv[ind[i]]; } m_ritz_val.swap(new_ritz_val); m_ritz_vec.swap(new_ritz_vec); m_ritz_conv.swap(new_ritz_conv); } public: /// \cond // If op is an lvalue SymEigsBase(OpType& op, const BOpType& Bop, Index nev, Index ncv) : m_op(op), m_n(op.rows()), m_nev(nev), m_ncv(ncv > m_n ? m_n : ncv), m_nmatop(0), m_niter(0), m_fac(ArnoldiOpType(op, Bop), m_ncv), m_info(CompInfo::NotComputed) { if (nev < 1 || nev > m_n - 1) throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 1, n is the size of matrix"); if (ncv <= nev || ncv > m_n) throw std::invalid_argument("ncv must satisfy nev < ncv <= n, n is the size of matrix"); } // If op is an rvalue SymEigsBase(OpType&& op, const BOpType& Bop, Index nev, Index ncv) : m_op_container(create_op_container(std::move(op))), m_op(m_op_container.front()), m_n(m_op.rows()), m_nev(nev), m_ncv(ncv > m_n ? m_n : ncv), m_nmatop(0), m_niter(0), m_fac(ArnoldiOpType(m_op, Bop), m_ncv), m_info(CompInfo::NotComputed) { if (nev < 1 || nev > m_n - 1) throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 1, n is the size of matrix"); if (ncv <= nev || ncv > m_n) throw std::invalid_argument("ncv must satisfy nev < ncv <= n, n is the size of matrix"); } /// /// Virtual destructor /// virtual ~SymEigsBase() {} /// \endcond /// /// Initializes the solver by providing an initial residual vector. /// /// \param init_resid Pointer to the initial residual vector. /// /// **Spectra** (and also **ARPACK**) uses an iterative algorithm /// to find eigenvalues. This function allows the user to provide the initial /// residual vector. /// void init(const Scalar* init_resid) { // Reset all matrices/vectors to zero m_ritz_val.resize(m_ncv); m_ritz_vec.resize(m_ncv, m_nev); m_ritz_est.resize(m_ncv); m_ritz_conv.resize(m_nev); m_ritz_val.setZero(); m_ritz_vec.setZero(); m_ritz_est.setZero(); m_ritz_conv.setZero(); m_nmatop = 0; m_niter = 0; // Initialize the Lanczos factorization MapConstVec v0(init_resid, m_n); m_fac.init(v0, m_nmatop); } /// /// Initializes the solver by providing a random initial residual vector. /// /// This overloaded function generates a random initial residual vector /// (with a fixed random seed) for the algorithm. Elements in the vector /// follow independent Uniform(-0.5, 0.5) distribution. /// void init() { SimpleRandom<Scalar> rng(0); Vector init_resid = rng.random_vec(m_n); init(init_resid.data()); } /// /// Conducts the major computation procedure. /// /// \param selection An enumeration value indicating the selection rule of /// the requested eigenvalues, for example `SortRule::LargestMagn` /// to retrieve eigenvalues with the largest magnitude. /// The full list of enumeration values can be found in /// \ref Enumerations. /// \param maxit Maximum number of iterations allowed in the algorithm. /// \param tol Precision parameter for the calculated eigenvalues. /// \param sorting Rule to sort the eigenvalues and eigenvectors. /// Supported values are /// `SortRule::LargestAlge`, `SortRule::LargestMagn`, /// `SortRule::SmallestAlge`, and `SortRule::SmallestMagn`. /// For example, `SortRule::LargestAlge` indicates that largest eigenvalues /// come first. Note that this argument is only used to /// **sort** the final result, and the **selection** rule /// (e.g. selecting the largest or smallest eigenvalues in the /// full spectrum) is specified by the parameter `selection`. /// /// \return Number of converged eigenvalues. /// Index compute(SortRule selection = SortRule::LargestMagn, Index maxit = 1000, Scalar tol = 1e-10, SortRule sorting = SortRule::LargestAlge) { // The m-step Lanczos factorization m_fac.factorize_from(1, m_ncv, m_nmatop); retrieve_ritzpair(selection); // Restarting Index i, nconv = 0, nev_adj; for (i = 0; i < maxit; i++) { nconv = num_converged(tol); if (nconv >= m_nev) break; nev_adj = nev_adjusted(nconv); restart(nev_adj, selection); } // Sorting results sort_ritzpair(sorting); m_niter += i + 1; m_info = (nconv >= m_nev) ? CompInfo::Successful : CompInfo::NotConverging; return (std::min)(m_nev, nconv); } /// /// Returns the status of the computation. /// The full list of enumeration values can be found in \ref Enumerations. /// CompInfo info() const { return m_info; } /// /// Returns the number of iterations used in the computation. /// Index num_iterations() const { return m_niter; } /// /// Returns the number of matrix operations used in the computation. /// Index num_operations() const { return m_nmatop; } /// /// Returns the converged eigenvalues. /// /// \return A vector containing the eigenvalues. /// Returned vector type will be `Eigen::Vector<Scalar, ...>`, depending on /// the template parameter `Scalar` defined. /// Vector eigenvalues() const { const Index nconv = m_ritz_conv.count(); Vector res(nconv); if (!nconv) return res; Index j = 0; for (Index i = 0; i < m_nev; i++) { if (m_ritz_conv[i]) { res[j] = m_ritz_val[i]; j++; } } return res; } /// /// Returns the eigenvectors associated with the converged eigenvalues. /// /// \param nvec The number of eigenvectors to return. /// /// \return A matrix containing the eigenvectors. /// Returned matrix type will be `Eigen::Matrix<Scalar, ...>`, /// depending on the template parameter `Scalar` defined. /// virtual Matrix eigenvectors(Index nvec) const { const Index nconv = m_ritz_conv.count(); nvec = (std::min)(nvec, nconv); Matrix res(m_n, nvec); if (!nvec) return res; Matrix ritz_vec_conv(m_ncv, nvec); Index j = 0; for (Index i = 0; i < m_nev && j < nvec; i++) { if (m_ritz_conv[i]) { ritz_vec_conv.col(j).noalias() = m_ritz_vec.col(i); j++; } } res.noalias() = m_fac.matrix_V() * ritz_vec_conv; return res; } /// /// Returns all converged eigenvectors. /// virtual Matrix eigenvectors() const { return eigenvectors(m_nev); } }; } // namespace Spectra #endif // SPECTRA_SYM_EIGS_BASE_H
15,129
32.325991
127
h
abess
abess-master/include/Spectra/SymEigsShiftSolver.h
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_SYM_EIGS_SHIFT_SOLVER_H #define SPECTRA_SYM_EIGS_SHIFT_SOLVER_H #include <Eigen/Core> #include "SymEigsBase.h" #include "Util/SelectionRule.h" #include "MatOp/DenseSymShiftSolve.h" namespace Spectra { /// /// \ingroup EigenSolver /// /// This class implements the eigen solver for real symmetric matrices using /// the **shift-and-invert mode**. The background information of the symmetric /// eigen solver is documented in the SymEigsSolver class. Here we focus on /// explaining the shift-and-invert mode. /// /// The shift-and-invert mode is based on the following fact: /// If \f$\lambda\f$ and \f$x\f$ are a pair of eigenvalue and eigenvector of /// matrix \f$A\f$, such that \f$Ax=\lambda x\f$, then for any \f$\sigma\f$, /// we have /// \f[(A-\sigma I)^{-1}x=\nu x\f] /// where /// \f[\nu=\frac{1}{\lambda-\sigma}\f] /// which indicates that \f$(\nu, x)\f$ is an eigenpair of the matrix /// \f$(A-\sigma I)^{-1}\f$. /// /// Therefore, if we pass the matrix operation \f$(A-\sigma I)^{-1}y\f$ /// (rather than \f$Ay\f$) to the eigen solver, then we would get the desired /// values of \f$\nu\f$, and \f$\lambda\f$ can also be easily obtained by noting /// that \f$\lambda=\sigma+\nu^{-1}\f$. /// /// The reason why we need this type of manipulation is that /// the algorithm of **Spectra** (and also **ARPACK**) /// is good at finding eigenvalues with large magnitude, but may fail in looking /// for eigenvalues that are close to zero. However, if we really need them, we /// can set \f$\sigma=0\f$, find the largest eigenvalues of \f$A^{-1}\f$, and then /// transform back to \f$\lambda\f$, since in this case largest values of \f$\nu\f$ /// implies smallest values of \f$\lambda\f$. /// /// To summarize, in the shift-and-invert mode, the selection rule will apply to /// \f$\nu=1/(\lambda-\sigma)\f$ rather than \f$\lambda\f$. So a selection rule /// of `LARGEST_MAGN` combined with shift \f$\sigma\f$ will find eigenvalues of /// \f$A\f$ that are closest to \f$\sigma\f$. But note that the eigenvalues() /// method will always return the eigenvalues in the original problem (i.e., /// returning \f$\lambda\f$ rather than \f$\nu\f$), and eigenvectors are the /// same for both the original problem and the shifted-and-inverted problem. /// /// \tparam OpType The name of the matrix operation class. Users could either /// use the wrapper classes such as DenseSymShiftSolve and /// SparseSymShiftSolve, or define their own that implements the type /// definition `Scalar` and all the public member functions as in /// DenseSymShiftSolve. /// /// Below is an example that illustrates the use of the shift-and-invert mode: /// /// \code{.cpp} /// #include <Eigen/Core> /// #include <Spectra/SymEigsShiftSolver.h> /// // <Spectra/MatOp/DenseSymShiftSolve.h> is implicitly included /// #include <iostream> /// /// using namespace Spectra; /// /// int main() /// { /// // A size-10 diagonal matrix with elements 1, 2, ..., 10 /// Eigen::MatrixXd M = Eigen::MatrixXd::Zero(10, 10); /// for (int i = 0; i < M.rows(); i++) /// M(i, i) = i + 1; /// /// // Construct matrix operation object using the wrapper class /// DenseSymShiftSolve<double> op(M); /// /// // Construct eigen solver object with shift 0 /// // This will find eigenvalues that are closest to 0 /// SymEigsShiftSolver<DenseSymShiftSolve<double>> eigs(op, 3, 6, 0.0); /// /// eigs.init(); /// eigs.compute(SortRule::LargestMagn); /// if (eigs.info() == CompInfo::Successful) /// { /// Eigen::VectorXd evalues = eigs.eigenvalues(); /// // Will get (3.0, 2.0, 1.0) /// std::cout << "Eigenvalues found:\n" << evalues << std::endl; /// } /// /// return 0; /// } /// \endcode /// /// Also an example for user-supplied matrix shift-solve operation class: /// /// \code{.cpp} /// #include <Eigen/Core> /// #include <Spectra/SymEigsShiftSolver.h> /// #include <iostream> /// /// using namespace Spectra; /// /// // M = diag(1, 2, ..., 10) /// class MyDiagonalTenShiftSolve /// { /// private: /// double sigma_; /// public: /// using Scalar = double; // A typedef named "Scalar" is required /// int rows() { return 10; } /// int cols() { return 10; } /// void set_shift(double sigma) { sigma_ = sigma; } /// // y_out = inv(A - sigma * I) * x_in /// // inv(A - sigma * I) = diag(1/(1-sigma), 1/(2-sigma), ...) /// void perform_op(double *x_in, double *y_out) const /// { /// for (int i = 0; i < rows(); i++) /// { /// y_out[i] = x_in[i] / (i + 1 - sigma_); /// } /// } /// }; /// /// int main() /// { /// MyDiagonalTenShiftSolve op; /// // Find three eigenvalues that are closest to 3.14 /// SymEigsShiftSolver<MyDiagonalTenShiftSolve> eigs(op, 3, 6, 3.14); /// eigs.init(); /// eigs.compute(SortRule::LargestMagn); /// if (eigs.info() == CompInfo::Successful) /// { /// Eigen::VectorXd evalues = eigs.eigenvalues(); /// // Will get (4.0, 3.0, 2.0) /// std::cout << "Eigenvalues found:\n" << evalues << std::endl; /// } /// /// return 0; /// } /// \endcode /// template <typename OpType = DenseSymShiftSolve<double>> class SymEigsShiftSolver : public SymEigsBase<OpType, IdentityBOp> { private: using Scalar = typename OpType::Scalar; using Index = Eigen::Index; using Array = Eigen::Array<Scalar, Eigen::Dynamic, 1>; using Base = SymEigsBase<OpType, IdentityBOp>; using Base::m_nev; using Base::m_ritz_val; const Scalar m_sigma; // First transform back the Ritz values, and then sort void sort_ritzpair(SortRule sort_rule) override { // The eigenvalues we get from the iteration is nu = 1 / (lambda - sigma) // So the eigenvalues of the original problem is lambda = 1 / nu + sigma m_ritz_val.head(m_nev).array() = Scalar(1) / m_ritz_val.head(m_nev).array() + m_sigma; Base::sort_ritzpair(sort_rule); } public: /// /// Constructor to create a eigen solver object using the shift-and-invert mode. /// /// \param op The matrix operation object that implements /// the shift-solve operation of \f$A\f$: calculating /// \f$(A-\sigma I)^{-1}v\f$ for any vector \f$v\f$. Users could either /// create the object from the wrapper class such as DenseSymShiftSolve, or /// define their own that implements all the public members /// as in DenseSymShiftSolve. /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$, /// where \f$n\f$ is the size of matrix. /// \param ncv Parameter that controls the convergence speed of the algorithm. /// Typically a larger `ncv_` means faster convergence, but it may /// also result in greater memory use and more matrix operations /// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$, /// and is advised to take \f$ncv \ge 2\cdot nev\f$. /// \param sigma The value of the shift. /// SymEigsShiftSolver(OpType& op, Index nev, Index ncv, const Scalar& sigma) : Base(op, IdentityBOp(), nev, ncv), m_sigma(sigma) { op.set_shift(m_sigma); } }; } // namespace Spectra #endif // SPECTRA_SYM_EIGS_SHIFT_SOLVER_H
7,762
37.621891
98
h
abess
abess-master/include/Spectra/SymEigsSolver.h
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_SYM_EIGS_SOLVER_H #define SPECTRA_SYM_EIGS_SOLVER_H #include <Eigen/Core> #include "SymEigsBase.h" #include "Util/SelectionRule.h" #include "MatOp/DenseSymMatProd.h" namespace Spectra { /// /// \ingroup EigenSolver /// /// This class implements the eigen solver for real symmetric matrices, i.e., /// to solve \f$Ax=\lambda x\f$ where \f$A\f$ is symmetric. /// /// **Spectra** is designed to calculate a specified number (\f$k\f$) /// of eigenvalues of a large square matrix (\f$A\f$). Usually \f$k\f$ is much /// less than the size of the matrix (\f$n\f$), so that only a few eigenvalues /// and eigenvectors are computed. /// /// Rather than providing the whole \f$A\f$ matrix, the algorithm only requires /// the matrix-vector multiplication operation of \f$A\f$. Therefore, users of /// this solver need to supply a class that computes the result of \f$Av\f$ /// for any given vector \f$v\f$. The name of this class should be given to /// the template parameter `OpType`, and instance of this class passed to /// the constructor of SymEigsSolver. /// /// If the matrix \f$A\f$ is already stored as a matrix object in **Eigen**, /// for example `Eigen::MatrixXd`, then there is an easy way to construct such a /// matrix operation class, by using the built-in wrapper class DenseSymMatProd /// that wraps an existing matrix object in **Eigen**. This is also the /// default template parameter for SymEigsSolver. For sparse matrices, the /// wrapper class SparseSymMatProd can be used similarly. /// /// If the users need to define their own matrix-vector multiplication operation /// class, it should define a public type `Scalar` to indicate the element type, /// and implement all the public member functions as in DenseSymMatProd. /// /// \tparam OpType The name of the matrix operation class. Users could either /// use the wrapper classes such as DenseSymMatProd and /// SparseSymMatProd, or define their own that implements the type /// definition `Scalar` and all the public member functions as in /// DenseSymMatProd. /// /// Below is an example that demonstrates the usage of this class. /// /// \code{.cpp} /// #include <Eigen/Core> /// #include <Spectra/SymEigsSolver.h> /// // <Spectra/MatOp/DenseSymMatProd.h> is implicitly included /// #include <iostream> /// /// using namespace Spectra; /// /// int main() /// { /// // We are going to calculate the eigenvalues of M /// Eigen::MatrixXd A = Eigen::MatrixXd::Random(10, 10); /// Eigen::MatrixXd M = A + A.transpose(); /// /// // Construct matrix operation object using the wrapper class DenseSymMatProd /// DenseSymMatProd<double> op(M); /// /// // Construct eigen solver object, requesting the largest three eigenvalues /// SymEigsSolver<DenseSymMatProd<double>> eigs(op, 3, 6); /// /// // Initialize and compute /// eigs.init(); /// int nconv = eigs.compute(SortRule::LargestAlge); /// /// // Retrieve results /// Eigen::VectorXd evalues; /// if (eigs.info() == CompInfo::Successful) /// evalues = eigs.eigenvalues(); /// /// std::cout << "Eigenvalues found:\n" << evalues << std::endl; /// /// return 0; /// } /// \endcode /// /// And here is an example for user-supplied matrix operation class. /// /// \code{.cpp} /// #include <Eigen/Core> /// #include <Spectra/SymEigsSolver.h> /// #include <iostream> /// /// using namespace Spectra; /// /// // M = diag(1, 2, ..., 10) /// class MyDiagonalTen /// { /// public: /// using Scalar = double; // A typedef named "Scalar" is required /// int rows() { return 10; } /// int cols() { return 10; } /// // y_out = M * x_in /// void perform_op(double *x_in, double *y_out) const /// { /// for (int i = 0; i < rows(); i++) /// { /// y_out[i] = x_in[i] * (i + 1); /// } /// } /// }; /// /// int main() /// { /// MyDiagonalTen op; /// SymEigsSolver<MyDiagonalTen> eigs(op, 3, 6); /// eigs.init(); /// eigs.compute(SortRule::LargestAlge); /// if (eigs.info() == CompInfo::Successful) /// { /// Eigen::VectorXd evalues = eigs.eigenvalues(); /// // Will get (10, 9, 8) /// std::cout << "Eigenvalues found:\n" << evalues << std::endl; /// } /// /// return 0; /// } /// \endcode /// template <typename OpType = DenseSymMatProd<double>> class SymEigsSolver : public SymEigsBase<OpType, IdentityBOp> { private: using Index = Eigen::Index; public: /// /// Constructor to create a solver object. /// /// \param op The matrix operation object that implements /// the matrix-vector multiplication operation of \f$A\f$: /// calculating \f$Av\f$ for any vector \f$v\f$. Users could either /// create the object from the wrapper class such as DenseSymMatProd, or /// define their own that implements all the public members /// as in DenseSymMatProd. /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$, /// where \f$n\f$ is the size of matrix. /// \param ncv Parameter that controls the convergence speed of the algorithm. /// Typically a larger `ncv` means faster convergence, but it may /// also result in greater memory use and more matrix operations /// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$, /// and is advised to take \f$ncv \ge 2\cdot nev\f$. /// SymEigsSolver(OpType& op, Index nev, Index ncv) : SymEigsBase<OpType, IdentityBOp>(op, IdentityBOp(), nev, ncv) {} }; } // namespace Spectra #endif // SPECTRA_SYM_EIGS_SOLVER_H
6,053
35.690909
96
h
abess
abess-master/include/Spectra/SymGEigsShiftSolver.h
// Copyright (C) 2020-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_SYM_GEIGS_SHIFT_SOLVER_H #define SPECTRA_SYM_GEIGS_SHIFT_SOLVER_H #include <utility> // std::move #include "SymEigsBase.h" #include "Util/GEigsMode.h" #include "MatOp/internal/SymGEigsShiftInvertOp.h" #include "MatOp/internal/SymGEigsBucklingOp.h" #include "MatOp/internal/SymGEigsCayleyOp.h" namespace Spectra { /// /// \ingroup GEigenSolver /// /// This class implements the generalized eigen solver for real symmetric /// matrices, i.e., to solve \f$Ax=\lambda Bx\f$ where \f$A\f$ and \f$B\f$ are symmetric /// matrices. A spectral transform is applied to seek interior /// generalized eigenvalues with respect to some shift \f$\sigma\f$. /// /// There are different modes of this solver, specified by the template parameter `Mode`. /// See the pages for the specialized classes for details. /// - The shift-and-invert mode transforms the problem into \f$(A-\sigma B)^{-1}Bx=\nu x\f$, /// where \f$\nu=1/(\lambda-\sigma)\f$. This mode assumes that \f$B\f$ is positive definite. /// See \ref SymGEigsShiftSolver<OpType, BOpType, GEigsMode::ShiftInvert> /// "SymGEigsShiftSolver (Shift-and-invert mode)" for more details. /// - The buckling mode transforms the problem into \f$(A-\sigma B)^{-1}Ax=\nu x\f$, /// where \f$\nu=\lambda/(\lambda-\sigma)\f$. This mode assumes that \f$A\f$ is positive definite. /// See \ref SymGEigsShiftSolver<OpType, BOpType, GEigsMode::Buckling> /// "SymGEigsShiftSolver (Buckling mode)" for more details. /// - The Cayley mode transforms the problem into \f$(A-\sigma B)^{-1}(A+\sigma B)x=\nu x\f$, /// where \f$\nu=(\lambda+\sigma)/(\lambda-\sigma)\f$. This mode assumes that \f$B\f$ is positive definite. /// See \ref SymGEigsShiftSolver<OpType, BOpType, GEigsMode::Cayley> /// "SymGEigsShiftSolver (Cayley mode)" for more details. // Empty class template template <typename OpType, typename BOpType, GEigsMode Mode> class SymGEigsShiftSolver {}; /// /// \ingroup GEigenSolver /// /// This class implements the generalized eigen solver for real symmetric /// matrices using the shift-and-invert spectral transformation. The original problem is /// to solve \f$Ax=\lambda Bx\f$, where \f$A\f$ is symmetric and \f$B\f$ is positive definite. /// The transformed problem is \f$(A-\sigma B)^{-1}Bx=\nu x\f$, where /// \f$\nu=1/(\lambda-\sigma)\f$, and \f$\sigma\f$ is a user-specified shift. /// /// This solver requires two matrix operation objects: one to compute \f$y=(A-\sigma B)^{-1}x\f$ /// for any vector \f$v\f$, and one for the matrix multiplication \f$Bv\f$. /// /// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation object /// can be created using the SymShiftInvert class, and the second one can be created /// using the DenseSymMatProd or SparseSymMatProd classes. If the users need to define their /// own operation classes, then they should implement all the public member functions as /// in those built-in classes. /// /// \tparam OpType The type of the first operation object. Users could either /// use the wrapper class SymShiftInvert, or define their own that implements /// the type definition `Scalar` and all the public member functions as in SymShiftInvert. /// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either /// use the wrapper classes such as DenseSymMatProd and /// SparseSymMatProd, or define their own that implements all the /// public member functions as in DenseSymMatProd. /// \tparam Mode Mode of the generalized eigen solver. In this solver /// it is Spectra::GEigsMode::ShiftInvert. /// /// Below is an example that demonstrates the usage of this class. /// /// \code{.cpp} /// #include <Eigen/Core> /// #include <Eigen/SparseCore> /// #include <Spectra/SymGEigsShiftSolver.h> /// #include <Spectra/MatOp/SymShiftInvert.h> /// #include <Spectra/MatOp/SparseSymMatProd.h> /// #include <iostream> /// /// using namespace Spectra; /// /// int main() /// { /// // We are going to solve the generalized eigenvalue problem /// // A * x = lambda * B * x, /// // where A is symmetric and B is positive definite /// const int n = 100; /// /// // Define the A matrix /// Eigen::MatrixXd M = Eigen::MatrixXd::Random(n, n); /// Eigen::MatrixXd A = M + M.transpose(); /// /// // Define the B matrix, a tridiagonal matrix with 2 on the diagonal /// // and 1 on the subdiagonals /// Eigen::SparseMatrix<double> B(n, n); /// B.reserve(Eigen::VectorXi::Constant(n, 3)); /// for (int i = 0; i < n; i++) /// { /// B.insert(i, i) = 2.0; /// if (i > 0) /// B.insert(i - 1, i) = 1.0; /// if (i < n - 1) /// B.insert(i + 1, i) = 1.0; /// } /// /// // Construct matrix operation objects using the wrapper classes /// // A is dense, B is sparse /// using OpType = SymShiftInvert<double, Eigen::Dense, Eigen::Sparse>; /// using BOpType = SparseSymMatProd<double>; /// OpType op(A, B); /// BOpType Bop(B); /// /// // Construct generalized eigen solver object, seeking three generalized /// // eigenvalues that are closest to zero. This is equivalent to specifying /// // a shift sigma = 0.0 combined with the SortRule::LargestMagn selection rule /// SymGEigsShiftSolver<OpType, BOpType, GEigsMode::ShiftInvert> /// geigs(op, Bop, 3, 6, 0.0); /// /// // Initialize and compute /// geigs.init(); /// int nconv = geigs.compute(SortRule::LargestMagn); /// /// // Retrieve results /// Eigen::VectorXd evalues; /// Eigen::MatrixXd evecs; /// if (geigs.info() == CompInfo::Successful) /// { /// evalues = geigs.eigenvalues(); /// evecs = geigs.eigenvectors(); /// } /// /// std::cout << "Number of converged generalized eigenvalues: " << nconv << std::endl; /// std::cout << "Generalized eigenvalues found:\n" << evalues << std::endl; /// std::cout << "Generalized eigenvectors found:\n" << evecs.topRows(10) << std::endl; /// /// return 0; /// } /// \endcode // Partial specialization for mode = GEigsMode::ShiftInvert template <typename OpType, typename BOpType> class SymGEigsShiftSolver<OpType, BOpType, GEigsMode::ShiftInvert> : public SymEigsBase<SymGEigsShiftInvertOp<OpType, BOpType>, BOpType> { private: using Scalar = typename OpType::Scalar; using Index = Eigen::Index; using Array = Eigen::Array<Scalar, Eigen::Dynamic, 1>; using ModeMatOp = SymGEigsShiftInvertOp<OpType, BOpType>; using Base = SymEigsBase<ModeMatOp, BOpType>; using Base::m_nev; using Base::m_ritz_val; const Scalar m_sigma; // Set shift and forward static ModeMatOp set_shift_and_move(ModeMatOp&& op, const Scalar& sigma) { op.set_shift(sigma); return std::move(op); } // First transform back the Ritz values, and then sort void sort_ritzpair(SortRule sort_rule) override { // The eigenvalues we get from the iteration is nu = 1 / (lambda - sigma) // So the eigenvalues of the original problem is lambda = 1 / nu + sigma m_ritz_val.head(m_nev).array() = Scalar(1) / m_ritz_val.head(m_nev).array() + m_sigma; Base::sort_ritzpair(sort_rule); } public: /// /// Constructor to create a solver object. /// /// \param op The matrix operation object that computes \f$y=(A-\sigma B)^{-1}v\f$ /// for any vector \f$v\f$. Users could either create the object from the /// wrapper class SymShiftInvert, or define their own that implements all /// the public members as in SymShiftInvert. /// \param Bop The \f$B\f$ matrix operation object that implements the matrix-vector /// multiplication \f$Bv\f$. Users could either create the object from the /// wrapper classes such as DenseSymMatProd and SparseSymMatProd, or /// define their own that implements all the public member functions /// as in DenseSymMatProd. \f$B\f$ needs to be positive definite. /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$, /// where \f$n\f$ is the size of matrix. /// \param ncv Parameter that controls the convergence speed of the algorithm. /// Typically a larger `ncv` means faster convergence, but it may /// also result in greater memory use and more matrix operations /// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$, /// and is advised to take \f$ncv \ge 2\cdot nev\f$. /// \param sigma The value of the shift. /// SymGEigsShiftSolver(OpType& op, BOpType& Bop, Index nev, Index ncv, const Scalar& sigma) : Base(set_shift_and_move(ModeMatOp(op, Bop), sigma), Bop, nev, ncv), m_sigma(sigma) {} }; /// /// \ingroup GEigenSolver /// /// This class implements the generalized eigen solver for real symmetric /// matrices in the buckling mode. The original problem is /// to solve \f$Kx=\lambda K_G x\f$, where \f$K\f$ is positive definite and \f$K_G\f$ is symmetric. /// The transformed problem is \f$(K-\sigma K_G)^{-1}Kx=\nu x\f$, where /// \f$\nu=\lambda/(\lambda-\sigma)\f$, and \f$\sigma\f$ is a user-specified shift. /// /// This solver requires two matrix operation objects: one to compute \f$y=(K-\sigma K_G)^{-1}x\f$ /// for any vector \f$v\f$, and one for the matrix multiplication \f$Kv\f$. /// /// If \f$K\f$ and \f$K_G\f$ are stored as Eigen matrices, then the first operation object /// can be created using the SymShiftInvert class, and the second one can be created /// using the DenseSymMatProd or SparseSymMatProd classes. If the users need to define their /// own operation classes, then they should implement all the public member functions as /// in those built-in classes. /// /// \tparam OpType The type of the first operation object. Users could either /// use the wrapper class SymShiftInvert, or define their own that implements /// the type definition `Scalar` and all the public member functions as in SymShiftInvert. /// \tparam BOpType The name of the matrix operation class for \f$K\f$. Users could either /// use the wrapper classes such as DenseSymMatProd and /// SparseSymMatProd, or define their own that implements all the /// public member functions as in DenseSymMatProd. /// \tparam Mode Mode of the generalized eigen solver. In this solver /// it is Spectra::GEigsMode::Buckling. /// /// Below is an example that demonstrates the usage of this class. /// /// \code{.cpp} /// #include <Eigen/Core> /// #include <Eigen/SparseCore> /// #include <Spectra/SymGEigsShiftSolver.h> /// #include <Spectra/MatOp/SymShiftInvert.h> /// #include <Spectra/MatOp/SparseSymMatProd.h> /// #include <iostream> /// /// using namespace Spectra; /// /// int main() /// { /// // We are going to solve the generalized eigenvalue problem /// // K * x = lambda * KG * x, /// // where K is positive definite, and KG is symmetric /// const int n = 100; /// /// // Define the K matrix, a tridiagonal matrix with 2 on the diagonal /// // and 1 on the subdiagonals /// Eigen::SparseMatrix<double> K(n, n); /// K.reserve(Eigen::VectorXi::Constant(n, 3)); /// for (int i = 0; i < n; i++) /// { /// K.insert(i, i) = 2.0; /// if (i > 0) /// K.insert(i - 1, i) = 1.0; /// if (i < n - 1) /// K.insert(i + 1, i) = 1.0; /// } /// /// // Define the KG matrix /// Eigen::MatrixXd M = Eigen::MatrixXd::Random(n, n); /// Eigen::MatrixXd KG = M + M.transpose(); /// /// // Construct matrix operation objects using the wrapper classes /// // K is sparse, KG is dense /// using OpType = SymShiftInvert<double, Eigen::Sparse, Eigen::Dense>; /// using BOpType = SparseSymMatProd<double>; /// OpType op(K, KG); /// BOpType Bop(K); /// /// // Construct generalized eigen solver object, seeking three generalized /// // eigenvalues that are closest to and larger than 1.0. This is equivalent to /// // specifying a shift sigma = 1.0 combined with the SortRule::LargestAlge /// // selection rule /// SymGEigsShiftSolver<OpType, BOpType, GEigsMode::Buckling> /// geigs(op, Bop, 3, 6, 1.0); /// /// // Initialize and compute /// geigs.init(); /// int nconv = geigs.compute(SortRule::LargestAlge); /// /// // Retrieve results /// Eigen::VectorXd evalues; /// Eigen::MatrixXd evecs; /// if (geigs.info() == CompInfo::Successful) /// { /// evalues = geigs.eigenvalues(); /// evecs = geigs.eigenvectors(); /// } /// /// std::cout << "Number of converged generalized eigenvalues: " << nconv << std::endl; /// std::cout << "Generalized eigenvalues found:\n" << evalues << std::endl; /// std::cout << "Generalized eigenvectors found:\n" << evecs.topRows(10) << std::endl; /// /// return 0; /// } /// \endcode // Partial specialization for mode = GEigsMode::Buckling template <typename OpType, typename BOpType> class SymGEigsShiftSolver<OpType, BOpType, GEigsMode::Buckling> : public SymEigsBase<SymGEigsBucklingOp<OpType, BOpType>, BOpType> { private: using Scalar = typename OpType::Scalar; using Index = Eigen::Index; using Array = Eigen::Array<Scalar, Eigen::Dynamic, 1>; using ModeMatOp = SymGEigsBucklingOp<OpType, BOpType>; using Base = SymEigsBase<ModeMatOp, BOpType>; using Base::m_nev; using Base::m_ritz_val; const Scalar m_sigma; // Set shift and forward static ModeMatOp set_shift_and_move(ModeMatOp&& op, const Scalar& sigma) { if (sigma == Scalar(0)) throw std::invalid_argument("SymGEigsShiftSolver: sigma cannot be zero in the buckling mode"); op.set_shift(sigma); return std::move(op); } // First transform back the Ritz values, and then sort void sort_ritzpair(SortRule sort_rule) override { // The eigenvalues we get from the iteration is nu = lambda / (lambda - sigma) // So the eigenvalues of the original problem is lambda = sigma * nu / (nu - 1) m_ritz_val.head(m_nev).array() = m_sigma * m_ritz_val.head(m_nev).array() / (m_ritz_val.head(m_nev).array() - Scalar(1)); Base::sort_ritzpair(sort_rule); } public: /// /// Constructor to create a solver object. /// /// \param op The matrix operation object that computes \f$y=(K-\sigma K_G)^{-1}v\f$ /// for any vector \f$v\f$. Users could either create the object from the /// wrapper class SymShiftInvert, or define their own that implements all /// the public members as in SymShiftInvert. /// \param Bop The \f$K\f$ matrix operation object that implements the matrix-vector /// multiplication \f$Kv\f$. Users could either create the object from the /// wrapper classes such as DenseSymMatProd and SparseSymMatProd, or /// define their own that implements all the public member functions /// as in DenseSymMatProd. \f$K\f$ needs to be positive definite. /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$, /// where \f$n\f$ is the size of matrix. /// \param ncv Parameter that controls the convergence speed of the algorithm. /// Typically a larger `ncv` means faster convergence, but it may /// also result in greater memory use and more matrix operations /// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$, /// and is advised to take \f$ncv \ge 2\cdot nev\f$. /// \param sigma The value of the shift. /// SymGEigsShiftSolver(OpType& op, BOpType& Bop, Index nev, Index ncv, const Scalar& sigma) : Base(set_shift_and_move(ModeMatOp(op, Bop), sigma), Bop, nev, ncv), m_sigma(sigma) {} }; /// /// \ingroup GEigenSolver /// /// This class implements the generalized eigen solver for real symmetric /// matrices using the Cayley spectral transformation. The original problem is /// to solve \f$Ax=\lambda Bx\f$, where \f$A\f$ is symmetric and \f$B\f$ is positive definite. /// The transformed problem is \f$(A-\sigma B)^{-1}(A+\sigma B)x=\nu x\f$, where /// \f$\nu=(\lambda+\sigma)/(\lambda-\sigma)\f$, and \f$\sigma\f$ is a user-specified shift. /// /// This solver requires two matrix operation objects: one to compute \f$y=(A-\sigma B)^{-1}x\f$ /// for any vector \f$v\f$, and one for the matrix multiplication \f$Bv\f$. /// /// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation object /// can be created using the SymShiftInvert class, and the second one can be created /// using the DenseSymMatProd or SparseSymMatProd classes. If the users need to define their /// own operation classes, then they should implement all the public member functions as /// in those built-in classes. /// /// \tparam OpType The type of the first operation object. Users could either /// use the wrapper class SymShiftInvert, or define their own that implements /// the type definition `Scalar` and all the public member functions as in SymShiftInvert. /// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either /// use the wrapper classes such as DenseSymMatProd and /// SparseSymMatProd, or define their own that implements all the /// public member functions as in DenseSymMatProd. /// \tparam Mode Mode of the generalized eigen solver. In this solver /// it is Spectra::GEigsMode::Cayley. // Partial specialization for mode = GEigsMode::Cayley template <typename OpType, typename BOpType> class SymGEigsShiftSolver<OpType, BOpType, GEigsMode::Cayley> : public SymEigsBase<SymGEigsCayleyOp<OpType, BOpType>, BOpType> { private: using Scalar = typename OpType::Scalar; using Index = Eigen::Index; using Array = Eigen::Array<Scalar, Eigen::Dynamic, 1>; using ModeMatOp = SymGEigsCayleyOp<OpType, BOpType>; using Base = SymEigsBase<ModeMatOp, BOpType>; using Base::m_nev; using Base::m_ritz_val; const Scalar m_sigma; // Set shift and forward static ModeMatOp set_shift_and_move(ModeMatOp&& op, const Scalar& sigma) { if (sigma == Scalar(0)) throw std::invalid_argument("SymGEigsShiftSolver: sigma cannot be zero in the Cayley mode"); op.set_shift(sigma); return std::move(op); } // First transform back the Ritz values, and then sort void sort_ritzpair(SortRule sort_rule) override { // The eigenvalues we get from the iteration is nu = (lambda + sigma) / (lambda - sigma) // So the eigenvalues of the original problem is lambda = sigma * (nu + 1) / (nu - 1) m_ritz_val.head(m_nev).array() = m_sigma * (m_ritz_val.head(m_nev).array() + Scalar(1)) / (m_ritz_val.head(m_nev).array() - Scalar(1)); Base::sort_ritzpair(sort_rule); } public: /// /// Constructor to create a solver object. /// /// \param op The matrix operation object that computes \f$y=(A-\sigma B)^{-1}v\f$ /// for any vector \f$v\f$. Users could either create the object from the /// wrapper class SymShiftInvert, or define their own that implements all /// the public members as in SymShiftInvert. /// \param Bop The \f$B\f$ matrix operation object that implements the matrix-vector /// multiplication \f$Bv\f$. Users could either create the object from the /// wrapper classes such as DenseSymMatProd and SparseSymMatProd, or /// define their own that implements all the public member functions /// as in DenseSymMatProd. \f$B\f$ needs to be positive definite. /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$, /// where \f$n\f$ is the size of matrix. /// \param ncv Parameter that controls the convergence speed of the algorithm. /// Typically a larger `ncv` means faster convergence, but it may /// also result in greater memory use and more matrix operations /// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$, /// and is advised to take \f$ncv \ge 2\cdot nev\f$. /// \param sigma The value of the shift. /// SymGEigsShiftSolver(OpType& op, BOpType& Bop, Index nev, Index ncv, const Scalar& sigma) : Base(set_shift_and_move(ModeMatOp(op, Bop), sigma), Bop, nev, ncv), m_sigma(sigma) {} }; } // namespace Spectra #endif // SPECTRA_SYM_GEIGS_SHIFT_SOLVER_H
21,455
45.241379
109
h
abess
abess-master/include/Spectra/SymGEigsSolver.h
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_SYM_GEIGS_SOLVER_H #define SPECTRA_SYM_GEIGS_SOLVER_H #include "SymEigsBase.h" #include "Util/GEigsMode.h" #include "MatOp/internal/SymGEigsCholeskyOp.h" #include "MatOp/internal/SymGEigsRegInvOp.h" namespace Spectra { /// /// \defgroup GEigenSolver Generalized Eigen Solvers /// /// Generalized eigen solvers for different types of problems. /// /// /// \ingroup GEigenSolver /// /// This class implements the generalized eigen solver for real symmetric /// matrices, i.e., to solve \f$Ax=\lambda Bx\f$ where \f$A\f$ is symmetric and /// \f$B\f$ is positive definite. /// /// There are two modes of this solver, specified by the template parameter `Mode`. /// See the pages for the specialized classes for details. /// - The Cholesky mode assumes that \f$B\f$ can be factorized using Cholesky /// decomposition, which is the preferred mode when the decomposition is /// available. (This can be easily done in Eigen using the dense or sparse /// Cholesky solver.) /// See \ref SymGEigsSolver<OpType, BOpType, GEigsMode::Cholesky> "SymGEigsSolver (Cholesky mode)" for more details. /// - The regular inverse mode requires the matrix-vector product \f$Bv\f$ and the /// linear equation solving operation \f$B^{-1}v\f$. This mode should only be /// used when the Cholesky decomposition of \f$B\f$ is hard to implement, or /// when computing \f$B^{-1}v\f$ is much faster than the Cholesky decomposition. /// See \ref SymGEigsSolver<OpType, BOpType, GEigsMode::RegularInverse> "SymGEigsSolver (Regular inverse mode)" for more details. // Empty class template template <typename OpType, typename BOpType, GEigsMode Mode> class SymGEigsSolver {}; /// /// \ingroup GEigenSolver /// /// This class implements the generalized eigen solver for real symmetric /// matrices using Cholesky decomposition, i.e., to solve \f$Ax=\lambda Bx\f$ /// where \f$A\f$ is symmetric and \f$B\f$ is positive definite with the Cholesky /// decomposition \f$B=LL'\f$. /// /// This solver requires two matrix operation objects: one for \f$A\f$ that implements /// the matrix multiplication \f$Av\f$, and one for \f$B\f$ that implements the lower /// and upper triangular solving \f$L^{-1}v\f$ and \f$(L')^{-1}v\f$. /// /// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation /// can be created using the DenseSymMatProd or SparseSymMatProd classes, and /// the second operation can be created using the DenseCholesky or SparseCholesky /// classes. If the users need to define their own operation classes, then they /// should implement all the public member functions as in those built-in classes. /// /// \tparam OpType The name of the matrix operation class for \f$A\f$. Users could either /// use the wrapper classes such as DenseSymMatProd and /// SparseSymMatProd, or define their own that implements the type /// definition `Scalar` and all the public member functions as in /// DenseSymMatProd. /// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either /// use the wrapper classes such as DenseCholesky and /// SparseCholesky, or define their own that implements all the /// public member functions as in DenseCholesky. /// \tparam Mode Mode of the generalized eigen solver. In this solver /// it is Spectra::GEigsMode::Cholesky. /// /// Below is an example that demonstrates the usage of this class. /// /// \code{.cpp} /// #include <Eigen/Core> /// #include <Eigen/SparseCore> /// #include <Eigen/Eigenvalues> /// #include <Spectra/SymGEigsSolver.h> /// #include <Spectra/MatOp/DenseSymMatProd.h> /// #include <Spectra/MatOp/SparseCholesky.h> /// #include <iostream> /// /// using namespace Spectra; /// /// int main() /// { /// // We are going to solve the generalized eigenvalue problem A * x = lambda * B * x /// const int n = 100; /// /// // Define the A matrix /// Eigen::MatrixXd M = Eigen::MatrixXd::Random(n, n); /// Eigen::MatrixXd A = M + M.transpose(); /// /// // Define the B matrix, a band matrix with 2 on the diagonal and 1 on the subdiagonals /// Eigen::SparseMatrix<double> B(n, n); /// B.reserve(Eigen::VectorXi::Constant(n, 3)); /// for (int i = 0; i < n; i++) /// { /// B.insert(i, i) = 2.0; /// if (i > 0) /// B.insert(i - 1, i) = 1.0; /// if (i < n - 1) /// B.insert(i + 1, i) = 1.0; /// } /// /// // Construct matrix operation objects using the wrapper classes /// DenseSymMatProd<double> op(A); /// SparseCholesky<double> Bop(B); /// /// // Construct generalized eigen solver object, requesting the largest three generalized eigenvalues /// SymGEigsSolver<DenseSymMatProd<double>, SparseCholesky<double>, GEigsMode::Cholesky> /// geigs(op, Bop, 3, 6); /// /// // Initialize and compute /// geigs.init(); /// int nconv = geigs.compute(SortRule::LargestAlge); /// /// // Retrieve results /// Eigen::VectorXd evalues; /// Eigen::MatrixXd evecs; /// if (geigs.info() == CompInfo::Successful) /// { /// evalues = geigs.eigenvalues(); /// evecs = geigs.eigenvectors(); /// } /// /// std::cout << "Generalized eigenvalues found:\n" << evalues << std::endl; /// std::cout << "Generalized eigenvectors found:\n" << evecs.topRows(10) << std::endl; /// /// // Verify results using the generalized eigen solver in Eigen /// Eigen::MatrixXd Bdense = B; /// Eigen::GeneralizedSelfAdjointEigenSolver<Eigen::MatrixXd> es(A, Bdense); /// /// std::cout << "Generalized eigenvalues:\n" << es.eigenvalues().tail(3) << std::endl; /// std::cout << "Generalized eigenvectors:\n" << es.eigenvectors().rightCols(3).topRows(10) << std::endl; /// /// return 0; /// } /// \endcode // Partial specialization for mode = GEigsMode::Cholesky template <typename OpType, typename BOpType> class SymGEigsSolver<OpType, BOpType, GEigsMode::Cholesky> : public SymEigsBase<SymGEigsCholeskyOp<OpType, BOpType>, IdentityBOp> { private: using Scalar = typename OpType::Scalar; using Index = Eigen::Index; using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using ModeMatOp = SymGEigsCholeskyOp<OpType, BOpType>; using Base = SymEigsBase<ModeMatOp, IdentityBOp>; const BOpType& m_Bop; public: /// /// Constructor to create a solver object. /// /// \param op The \f$A\f$ matrix operation object that implements the matrix-vector /// multiplication operation of \f$A\f$: /// calculating \f$Av\f$ for any vector \f$v\f$. Users could either /// create the object from the wrapper classes such as DenseSymMatProd, or /// define their own that implements all the public members /// as in DenseSymMatProd. /// \param Bop The \f$B\f$ matrix operation object that represents a Cholesky decomposition of \f$B\f$. /// It should implement the lower and upper triangular solving operations: /// calculating \f$L^{-1}v\f$ and \f$(L')^{-1}v\f$ for any vector /// \f$v\f$, where \f$LL'=B\f$. Users could either /// create the object from the wrapper classes such as DenseCholesky, or /// define their own that implements all the public member functions /// as in DenseCholesky. \f$B\f$ needs to be positive definite. /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$, /// where \f$n\f$ is the size of matrix. /// \param ncv Parameter that controls the convergence speed of the algorithm. /// Typically a larger `ncv` means faster convergence, but it may /// also result in greater memory use and more matrix operations /// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$, /// and is advised to take \f$ncv \ge 2\cdot nev\f$. /// SymGEigsSolver(OpType& op, BOpType& Bop, Index nev, Index ncv) : Base(ModeMatOp(op, Bop), IdentityBOp(), nev, ncv), m_Bop(Bop) {} /// \cond Matrix eigenvectors(Index nvec) const override { Matrix res = Base::eigenvectors(nvec); Vector tmp(res.rows()); const Index nconv = res.cols(); for (Index i = 0; i < nconv; i++) { m_Bop.upper_triangular_solve(&res(0, i), tmp.data()); res.col(i).noalias() = tmp; } return res; } Matrix eigenvectors() const override { return SymGEigsSolver<OpType, BOpType, GEigsMode::Cholesky>::eigenvectors(this->m_nev); } /// \endcond }; /// /// \ingroup GEigenSolver /// /// This class implements the generalized eigen solver for real symmetric /// matrices in the regular inverse mode, i.e., to solve \f$Ax=\lambda Bx\f$ /// where \f$A\f$ is symmetric, and \f$B\f$ is positive definite with the operations /// defined below. /// /// This solver requires two matrix operation objects: one for \f$A\f$ that implements /// the matrix multiplication \f$Av\f$, and one for \f$B\f$ that implements the /// matrix-vector product \f$Bv\f$ and the linear equation solving operation \f$B^{-1}v\f$. /// /// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation /// can be created using the DenseSymMatProd or SparseSymMatProd classes, and /// the second operation can be created using the SparseRegularInverse class. There is no /// wrapper class for a dense \f$B\f$ matrix since in this case the Cholesky mode /// is always preferred. If the users need to define their own operation classes, then they /// should implement all the public member functions as in those built-in classes. /// /// \tparam OpType The name of the matrix operation class for \f$A\f$. Users could either /// use the wrapper classes such as DenseSymMatProd and /// SparseSymMatProd, or define their own that implements the type /// definition `Scalar` and all the public member functions as in /// DenseSymMatProd. /// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either /// use the wrapper class SparseRegularInverse, or define their /// own that implements all the public member functions as in /// SparseRegularInverse. /// \tparam Mode Mode of the generalized eigen solver. In this solver /// it is Spectra::GEigsMode::RegularInverse. /// // Partial specialization for mode = GEigsMode::RegularInverse template <typename OpType, typename BOpType> class SymGEigsSolver<OpType, BOpType, GEigsMode::RegularInverse> : public SymEigsBase<SymGEigsRegInvOp<OpType, BOpType>, BOpType> { private: using Index = Eigen::Index; using ModeMatOp = SymGEigsRegInvOp<OpType, BOpType>; using Base = SymEigsBase<ModeMatOp, BOpType>; public: /// /// Constructor to create a solver object. /// /// \param op The \f$A\f$ matrix operation object that implements the matrix-vector /// multiplication operation of \f$A\f$: /// calculating \f$Av\f$ for any vector \f$v\f$. Users could either /// create the object from the wrapper classes such as DenseSymMatProd, or /// define their own that implements all the public members /// as in DenseSymMatProd. /// \param Bop The \f$B\f$ matrix operation object that implements the multiplication operation /// \f$Bv\f$ and the linear equation solving operation \f$B^{-1}v\f$ for any vector \f$v\f$. /// Users could either create the object from the wrapper class SparseRegularInverse, or /// define their own that implements all the public member functions /// as in SparseRegularInverse. \f$B\f$ needs to be positive definite. /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$, /// where \f$n\f$ is the size of matrix. /// \param ncv Parameter that controls the convergence speed of the algorithm. /// Typically a larger `ncv` means faster convergence, but it may /// also result in greater memory use and more matrix operations /// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$, /// and is advised to take \f$ncv \ge 2\cdot nev\f$. /// SymGEigsSolver(OpType& op, BOpType& Bop, Index nev, Index ncv) : Base(ModeMatOp(op, Bop), Bop, nev, ncv) {} }; } // namespace Spectra #endif // SPECTRA_SYM_GEIGS_SOLVER_H
13,190
44.329897
131
h
abess
abess-master/include/Spectra/LinAlg/Arnoldi.h
// Copyright (C) 2018-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_ARNOLDI_H #define SPECTRA_ARNOLDI_H #include <Eigen/Core> #include <cmath> // std::sqrt #include <utility> // std::move #include <stdexcept> // std::invalid_argument #include "../MatOp/internal/ArnoldiOp.h" #include "../Util/TypeTraits.h" #include "../Util/SimpleRandom.h" #include "UpperHessenbergQR.h" #include "DoubleShiftQR.h" namespace Spectra { // Arnoldi factorization A * V = V * H + f * e' // A: n x n // V: n x k // H: k x k // f: n x 1 // e: [0, ..., 0, 1] // V and H are allocated of dimension m, so the maximum value of k is m template <typename Scalar, typename ArnoldiOpType> class Arnoldi { private: using Index = Eigen::Index; using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using MapVec = Eigen::Map<Vector>; using MapConstMat = Eigen::Map<const Matrix>; using MapConstVec = Eigen::Map<const Vector>; protected: // A very small value, but 1.0 / m_near_0 does not overflow // ~= 1e-307 for the "double" type static constexpr Scalar m_near_0 = TypeTraits<Scalar>::min() * Scalar(10); // The machine precision, ~= 1e-16 for the "double" type static constexpr Scalar m_eps = TypeTraits<Scalar>::epsilon(); ArnoldiOpType m_op; // Operators for the Arnoldi factorization const Index m_n; // dimension of A const Index m_m; // maximum dimension of subspace V Index m_k; // current dimension of subspace V Matrix m_fac_V; // V matrix in the Arnoldi factorization Matrix m_fac_H; // H matrix in the Arnoldi factorization Vector m_fac_f; // residual in the Arnoldi factorization Scalar m_beta; // ||f||, B-norm of f // Given orthonormal basis V (w.r.t. B), find a nonzero vector f such that V'Bf = 0 // With rounding errors, we hope V'B(f/||f||) < eps // Assume that f has been properly allocated void expand_basis(MapConstMat& V, const Index seed, Vector& f, Scalar& fnorm, Index& op_counter) { using std::sqrt; Vector v(m_n), Vf(V.cols()); for (Index iter = 0; iter < 5; iter++) { // Randomly generate a new vector and orthogonalize it against V SimpleRandom<Scalar> rng(seed + 123 * iter); // The first try forces f to be in the range of A if (iter == 0) { rng.random_vec(v); m_op.perform_op(v.data(), f.data()); op_counter++; } else { rng.random_vec(f); } // f <- f - V * V'Bf, so that f is orthogonal to V in B-norm m_op.trans_product(V, f, Vf); f.noalias() -= V * Vf; // fnorm <- ||f|| fnorm = m_op.norm(f); // Compute V'Bf again m_op.trans_product(V, f, Vf); // Test whether V'B(f/||f||) < eps Scalar ortho_err = Vf.cwiseAbs().maxCoeff(); // If not, iteratively correct the residual int count = 0; while (count < 3 && ortho_err >= m_eps * fnorm) { // f <- f - V * Vf f.noalias() -= V * Vf; // beta <- ||f|| fnorm = m_op.norm(f); m_op.trans_product(V, f, Vf); ortho_err = Vf.cwiseAbs().maxCoeff(); count++; } // If the condition is satisfied, simply return // Otherwise, go to the next iteration and try a new random vector if (ortho_err < m_eps * fnorm) return; } } public: // Copy an ArnoldiOp Arnoldi(const ArnoldiOpType& op, Index m) : m_op(op), m_n(op.rows()), m_m(m), m_k(0) {} // Move an ArnoldiOp Arnoldi(ArnoldiOpType&& op, Index m) : m_op(std::move(op)), m_n(op.rows()), m_m(m), m_k(0) {} // Const-reference to internal structures const Matrix& matrix_V() const { return m_fac_V; } const Matrix& matrix_H() const { return m_fac_H; } const Vector& vector_f() const { return m_fac_f; } Scalar f_norm() const { return m_beta; } Index subspace_dim() const { return m_k; } // Initialize with an operator and an initial vector void init(MapConstVec& v0, Index& op_counter) { m_fac_V.resize(m_n, m_m); m_fac_H.resize(m_m, m_m); m_fac_f.resize(m_n); m_fac_H.setZero(); // Verify the initial vector const Scalar v0norm = m_op.norm(v0); if (v0norm < m_near_0) throw std::invalid_argument("initial residual vector cannot be zero"); // Points to the first column of V MapVec v(m_fac_V.data(), m_n); // Force v to be in the range of A, i.e., v = A * v0 m_op.perform_op(v0.data(), v.data()); op_counter++; // Normalize const Scalar vnorm = m_op.norm(v); v /= vnorm; // Compute H and f Vector w(m_n); m_op.perform_op(v.data(), w.data()); op_counter++; m_fac_H(0, 0) = m_op.inner_product(v, w); m_fac_f.noalias() = w - v * m_fac_H(0, 0); // In some cases f is zero in exact arithmetics, but due to rounding errors // it may contain tiny fluctuations. When this happens, we force f to be zero if (m_fac_f.cwiseAbs().maxCoeff() < m_eps) { m_fac_f.setZero(); m_beta = Scalar(0); } else { m_beta = m_op.norm(m_fac_f); } // Indicate that this is a step-1 factorization m_k = 1; } // Arnoldi factorization starting from step-k virtual void factorize_from(Index from_k, Index to_m, Index& op_counter) { using std::sqrt; if (to_m <= from_k) return; if (from_k > m_k) { std::string msg = "Arnoldi: from_k (= " + std::to_string(from_k) + ") is larger than the current subspace dimension (= " + std::to_string(m_k) + ")"; throw std::invalid_argument(msg); } const Scalar beta_thresh = m_eps * sqrt(Scalar(m_n)); // Pre-allocate vectors Vector Vf(to_m); Vector w(m_n); // Keep the upperleft k x k submatrix of H and set other elements to 0 m_fac_H.rightCols(m_m - from_k).setZero(); m_fac_H.block(from_k, 0, m_m - from_k, from_k).setZero(); for (Index i = from_k; i <= to_m - 1; i++) { bool restart = false; // If beta = 0, then the next V is not full rank // We need to generate a new residual vector that is orthogonal // to the current V, which we call a restart if (m_beta < m_near_0) { MapConstMat V(m_fac_V.data(), m_n, i); // The first i columns expand_basis(V, 2 * i, m_fac_f, m_beta, op_counter); restart = true; } // v <- f / ||f|| m_fac_V.col(i).noalias() = m_fac_f / m_beta; // The (i+1)-th column // Note that H[i+1, i] equals to the unrestarted beta m_fac_H(i, i - 1) = restart ? Scalar(0) : m_beta; // w <- A * v, v = m_fac_V.col(i) m_op.perform_op(&m_fac_V(0, i), w.data()); op_counter++; const Index i1 = i + 1; // First i+1 columns of V MapConstMat Vs(m_fac_V.data(), m_n, i1); // h = m_fac_H(0:i, i) MapVec h(&m_fac_H(0, i), i1); // h <- V'Bw m_op.trans_product(Vs, w, h); // f <- w - V * h m_fac_f.noalias() = w - Vs * h; m_beta = m_op.norm(m_fac_f); if (m_beta > Scalar(0.717) * m_op.norm(h)) continue; // f/||f|| is going to be the next column of V, so we need to test // whether V'B(f/||f||) ~= 0 m_op.trans_product(Vs, m_fac_f, Vf.head(i1)); Scalar ortho_err = Vf.head(i1).cwiseAbs().maxCoeff(); // If not, iteratively correct the residual int count = 0; while (count < 5 && ortho_err > m_eps * m_beta) { // There is an edge case: when beta=||f|| is close to zero, f mostly consists // of noises of rounding errors, so the test [ortho_err < eps * beta] is very // likely to fail. In particular, if beta=0, then the test is ensured to fail. // Hence when this happens, we force f to be zero, and then restart in the // next iteration. if (m_beta < beta_thresh) { m_fac_f.setZero(); m_beta = Scalar(0); break; } // f <- f - V * Vf m_fac_f.noalias() -= Vs * Vf.head(i1); // h <- h + Vf h.noalias() += Vf.head(i1); // beta <- ||f|| m_beta = m_op.norm(m_fac_f); m_op.trans_product(Vs, m_fac_f, Vf.head(i1)); ortho_err = Vf.head(i1).cwiseAbs().maxCoeff(); count++; } } // Indicate that this is a step-m factorization m_k = to_m; } // Apply H -> Q'HQ, where Q is from a double shift QR decomposition void compress_H(const DoubleShiftQR<Scalar>& decomp) { decomp.matrix_QtHQ(m_fac_H); m_k -= 2; } // Apply H -> Q'HQ, where Q is from an upper Hessenberg QR decomposition void compress_H(const UpperHessenbergQR<Scalar>& decomp) { decomp.matrix_QtHQ(m_fac_H); m_k--; } // Apply V -> VQ and compute the new f. // Should be called after compress_H(), since m_k is updated there. // Only need to update the first k+1 columns of V // The first (m - k + i) elements of the i-th column of Q are non-zero, // and the rest are zero void compress_V(const Matrix& Q) { Matrix Vs(m_n, m_k + 1); for (Index i = 0; i < m_k; i++) { const Index nnz = m_m - m_k + i + 1; MapConstVec q(&Q(0, i), nnz); Vs.col(i).noalias() = m_fac_V.leftCols(nnz) * q; } Vs.col(m_k).noalias() = m_fac_V * Q.col(m_k); m_fac_V.leftCols(m_k + 1).noalias() = Vs; Vector fk = m_fac_f * Q(m_m - 1, m_k - 1) + m_fac_V.col(m_k) * m_fac_H(m_k, m_k - 1); m_fac_f.swap(fk); m_beta = m_op.norm(m_fac_f); } }; } // namespace Spectra #endif // SPECTRA_ARNOLDI_H
10,914
33.541139
100
h
abess
abess-master/include/Spectra/LinAlg/BKLDLT.h
// Copyright (C) 2019-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_BK_LDLT_H #define SPECTRA_BK_LDLT_H #include <Eigen/Core> #include <vector> #include <stdexcept> #include "../Util/CompInfo.h" namespace Spectra { // Bunch-Kaufman LDLT decomposition // References: // 1. Bunch, J. R., & Kaufman, L. (1977). Some stable methods for calculating inertia and solving symmetric linear systems. // Mathematics of computation, 31(137), 163-179. // 2. Golub, G. H., & Van Loan, C. F. (2012). Matrix computations (Vol. 3). JHU press. Section 4.4. // 3. Bunch-Parlett diagonal pivoting <http://oz.nthu.edu.tw/~d947207/Chap13_GE3.ppt> // 4. Ashcraft, C., Grimes, R. G., & Lewis, J. G. (1998). Accurate symmetric indefinite linear equation solvers. // SIAM Journal on Matrix Analysis and Applications, 20(2), 513-561. template <typename Scalar = double> class BKLDLT { private: using Index = Eigen::Index; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using MapVec = Eigen::Map<Vector>; using MapConstVec = Eigen::Map<const Vector>; using IntVector = Eigen::Matrix<Index, Eigen::Dynamic, 1>; using GenericVector = Eigen::Ref<Vector>; using ConstGenericVector = const Eigen::Ref<const Vector>; Index m_n; Vector m_data; // storage for a lower-triangular matrix std::vector<Scalar*> m_colptr; // pointers to columns IntVector m_perm; // [-2, -1, 3, 1, 4, 5]: 0 <-> 2, 1 <-> 1, 2 <-> 3, 3 <-> 1, 4 <-> 4, 5 <-> 5 std::vector<std::pair<Index, Index>> m_permc; // compressed version of m_perm: [(0, 2), (2, 3), (3, 1)] bool m_computed; CompInfo m_info; // Access to elements // Pointer to the k-th column Scalar* col_pointer(Index k) { return m_colptr[k]; } // A[i, j] -> m_colptr[j][i - j], i >= j Scalar& coeff(Index i, Index j) { return m_colptr[j][i - j]; } const Scalar& coeff(Index i, Index j) const { return m_colptr[j][i - j]; } // A[i, i] -> m_colptr[i][0] Scalar& diag_coeff(Index i) { return m_colptr[i][0]; } const Scalar& diag_coeff(Index i) const { return m_colptr[i][0]; } // Compute column pointers void compute_pointer() { m_colptr.clear(); m_colptr.reserve(m_n); Scalar* head = m_data.data(); for (Index i = 0; i < m_n; i++) { m_colptr.push_back(head); head += (m_n - i); } } // Copy mat - shift * I to m_data template <typename Derived> void copy_data(const Eigen::MatrixBase<Derived>& mat, int uplo, const Scalar& shift) { // If mat is an expression, first evaluate it into a temporary object // This can be achieved by assigning mat to a const Eigen::Ref<const Matrix>& // If mat is a plain object, no temporary object is created const Eigen::Ref<const typename Derived::PlainObject>& src(mat); // Efficient copying for column-major matrices with lower triangular part if ((!Derived::PlainObject::IsRowMajor) && uplo == Eigen::Lower) { for (Index j = 0; j < m_n; j++) { const Scalar* begin = &src.coeffRef(j, j); const Index len = m_n - j; std::copy(begin, begin + len, col_pointer(j)); diag_coeff(j) -= shift; } return; } Scalar* dest = m_data.data(); for (Index j = 0; j < m_n; j++) { for (Index i = j; i < m_n; i++, dest++) { if (uplo == Eigen::Lower) *dest = src.coeff(i, j); else *dest = src.coeff(j, i); } diag_coeff(j) -= shift; } } // Compute compressed permutations void compress_permutation() { for (Index i = 0; i < m_n; i++) { // Recover the permutation action const Index perm = (m_perm[i] >= 0) ? (m_perm[i]) : (-m_perm[i] - 1); if (perm != i) m_permc.push_back(std::make_pair(i, perm)); } } // Working on the A[k:end, k:end] submatrix // Exchange k <-> r // Assume r >= k void pivoting_1x1(Index k, Index r) { // No permutation if (k == r) { m_perm[k] = r; return; } // A[k, k] <-> A[r, r] std::swap(diag_coeff(k), diag_coeff(r)); // A[(r+1):end, k] <-> A[(r+1):end, r] std::swap_ranges(&coeff(r + 1, k), col_pointer(k + 1), &coeff(r + 1, r)); // A[(k+1):(r-1), k] <-> A[r, (k+1):(r-1)] Scalar* src = &coeff(k + 1, k); for (Index j = k + 1; j < r; j++, src++) { std::swap(*src, coeff(r, j)); } m_perm[k] = r; } // Working on the A[k:end, k:end] submatrix // Exchange [k+1, k] <-> [r, p] // Assume p >= k, r >= k+1 void pivoting_2x2(Index k, Index r, Index p) { pivoting_1x1(k, p); pivoting_1x1(k + 1, r); // A[k+1, k] <-> A[r, k] std::swap(coeff(k + 1, k), coeff(r, k)); // Use negative signs to indicate a 2x2 block // Also minus one to distinguish a negative zero from a positive zero m_perm[k] = -m_perm[k] - 1; m_perm[k + 1] = -m_perm[k + 1] - 1; } // A[r1, c1:c2] <-> A[r2, c1:c2] // Assume r2 >= r1 > c2 >= c1 void interchange_rows(Index r1, Index r2, Index c1, Index c2) { if (r1 == r2) return; for (Index j = c1; j <= c2; j++) { std::swap(coeff(r1, j), coeff(r2, j)); } } // lambda = |A[r, k]| = max{|A[k+1, k]|, ..., |A[end, k]|} // Largest (in magnitude) off-diagonal element in the first column of the current reduced matrix // r is the row index // Assume k < end Scalar find_lambda(Index k, Index& r) { using std::abs; const Scalar* head = col_pointer(k); // => A[k, k] const Scalar* end = col_pointer(k + 1); // Start with r=k+1, lambda=A[k+1, k] r = k + 1; Scalar lambda = abs(head[1]); // Scan remaining elements for (const Scalar* ptr = head + 2; ptr < end; ptr++) { const Scalar abs_elem = abs(*ptr); if (lambda < abs_elem) { lambda = abs_elem; r = k + (ptr - head); } } return lambda; } // sigma = |A[p, r]| = max {|A[k, r]|, ..., |A[end, r]|} \ {A[r, r]} // Largest (in magnitude) off-diagonal element in the r-th column of the current reduced matrix // p is the row index // Assume k < r < end Scalar find_sigma(Index k, Index r, Index& p) { using std::abs; // First search A[r+1, r], ..., A[end, r], which has the same task as find_lambda() // If r == end, we skip this search Scalar sigma = Scalar(-1); if (r < m_n - 1) sigma = find_lambda(r, p); // Then search A[k, r], ..., A[r-1, r], which maps to A[r, k], ..., A[r, r-1] for (Index j = k; j < r; j++) { const Scalar abs_elem = abs(coeff(r, j)); if (sigma < abs_elem) { sigma = abs_elem; p = j; } } return sigma; } // Generate permutations and apply to A // Return true if the resulting pivoting is 1x1, and false if 2x2 bool permutate_mat(Index k, const Scalar& alpha) { using std::abs; Index r = k, p = k; const Scalar lambda = find_lambda(k, r); // If lambda=0, no need to interchange if (lambda > Scalar(0)) { const Scalar abs_akk = abs(diag_coeff(k)); // If |A[k, k]| >= alpha * lambda, no need to interchange if (abs_akk < alpha * lambda) { const Scalar sigma = find_sigma(k, r, p); // If sigma * |A[k, k]| >= alpha * lambda^2, no need to interchange if (sigma * abs_akk < alpha * lambda * lambda) { if (abs_akk >= alpha * sigma) { // Permutation on A pivoting_1x1(k, r); // Permutation on L interchange_rows(k, r, 0, k - 1); return true; } else { // There are two versions of permutation here // 1. A[k+1, k] <-> A[r, k] // 2. A[k+1, k] <-> A[r, p], where p >= k and r >= k+1 // // Version 1 and 2 are used by Ref[1] and Ref[2], respectively // Version 1 implementation p = k; // Version 2 implementation // [r, p] and [p, r] are symmetric, but we need to make sure // p >= k and r >= k+1, so it is safe to always make r > p // One exception is when min{r,p} == k+1, in which case we make // r = k+1, so that only one permutation needs to be performed /* const Index rp_min = std::min(r, p); const Index rp_max = std::max(r, p); if(rp_min == k + 1) { r = rp_min; p = rp_max; } else { r = rp_max; p = rp_min; } */ // Right now we use Version 1 since it reduces the overhead of interchange // Permutation on A pivoting_2x2(k, r, p); // Permutation on L interchange_rows(k, p, 0, k - 1); interchange_rows(k + 1, r, 0, k - 1); return false; } } } } return true; } // E = [e11, e12] // [e21, e22] // Overwrite E with inv(E) void inverse_inplace_2x2(Scalar& e11, Scalar& e21, Scalar& e22) const { // inv(E) = [d11, d12], d11 = e22/delta, d21 = -e21/delta, d22 = e11/delta // [d21, d22] const Scalar delta = e11 * e22 - e21 * e21; std::swap(e11, e22); e11 /= delta; e22 /= delta; e21 = -e21 / delta; } // Return value is the status, CompInfo::Successful/NumericalIssue CompInfo gaussian_elimination_1x1(Index k) { // D = 1 / A[k, k] const Scalar akk = diag_coeff(k); // Return CompInfo::NumericalIssue if not invertible if (akk == Scalar(0)) return CompInfo::NumericalIssue; diag_coeff(k) = Scalar(1) / akk; // B -= l * l' / A[k, k], B := A[(k+1):end, (k+1):end], l := L[(k+1):end, k] Scalar* lptr = col_pointer(k) + 1; const Index ldim = m_n - k - 1; MapVec l(lptr, ldim); for (Index j = 0; j < ldim; j++) { MapVec(col_pointer(j + k + 1), ldim - j).noalias() -= (lptr[j] / akk) * l.tail(ldim - j); } // l /= A[k, k] l /= akk; return CompInfo::Successful; } // Return value is the status, CompInfo::Successful/NumericalIssue CompInfo gaussian_elimination_2x2(Index k) { // D = inv(E) Scalar& e11 = diag_coeff(k); Scalar& e21 = coeff(k + 1, k); Scalar& e22 = diag_coeff(k + 1); // Return CompInfo::NumericalIssue if not invertible if (e11 * e22 - e21 * e21 == Scalar(0)) return CompInfo::NumericalIssue; inverse_inplace_2x2(e11, e21, e22); // X = l * inv(E), l := L[(k+2):end, k:(k+1)] Scalar* l1ptr = &coeff(k + 2, k); Scalar* l2ptr = &coeff(k + 2, k + 1); const Index ldim = m_n - k - 2; MapVec l1(l1ptr, ldim), l2(l2ptr, ldim); Eigen::Matrix<Scalar, Eigen::Dynamic, 2> X(ldim, 2); X.col(0).noalias() = l1 * e11 + l2 * e21; X.col(1).noalias() = l1 * e21 + l2 * e22; // B -= l * inv(E) * l' = X * l', B = A[(k+2):end, (k+2):end] for (Index j = 0; j < ldim; j++) { MapVec(col_pointer(j + k + 2), ldim - j).noalias() -= (X.col(0).tail(ldim - j) * l1ptr[j] + X.col(1).tail(ldim - j) * l2ptr[j]); } // l = X l1.noalias() = X.col(0); l2.noalias() = X.col(1); return CompInfo::Successful; } public: BKLDLT() : m_n(0), m_computed(false), m_info(CompInfo::NotComputed) {} // Factorize mat - shift * I template <typename Derived> BKLDLT(const Eigen::MatrixBase<Derived>& mat, int uplo = Eigen::Lower, const Scalar& shift = Scalar(0)) : m_n(mat.rows()), m_computed(false), m_info(CompInfo::NotComputed) { compute(mat, uplo, shift); } template <typename Derived> void compute(const Eigen::MatrixBase<Derived>& mat, int uplo = Eigen::Lower, const Scalar& shift = Scalar(0)) { using std::abs; m_n = mat.rows(); if (m_n != mat.cols()) throw std::invalid_argument("BKLDLT: matrix must be square"); m_perm.setLinSpaced(m_n, 0, m_n - 1); m_permc.clear(); // Copy data m_data.resize((m_n * (m_n + 1)) / 2); compute_pointer(); copy_data(mat, uplo, shift); const Scalar alpha = (1.0 + std::sqrt(17.0)) / 8.0; Index k = 0; for (k = 0; k < m_n - 1; k++) { // 1. Interchange rows and columns of A, and save the result to m_perm bool is_1x1 = permutate_mat(k, alpha); // 2. Gaussian elimination if (is_1x1) { m_info = gaussian_elimination_1x1(k); } else { m_info = gaussian_elimination_2x2(k); k++; } // 3. Check status if (m_info != CompInfo::Successful) break; } // Invert the last 1x1 block if it exists if (k == m_n - 1) { const Scalar akk = diag_coeff(k); if (akk == Scalar(0)) m_info = CompInfo::NumericalIssue; diag_coeff(k) = Scalar(1) / diag_coeff(k); } compress_permutation(); m_computed = true; } // Solve Ax=b void solve_inplace(GenericVector b) const { if (!m_computed) throw std::logic_error("BKLDLT: need to call compute() first"); // PAP' = LDL' // 1. b -> Pb Scalar* x = b.data(); MapVec res(x, m_n); Index npermc = m_permc.size(); for (Index i = 0; i < npermc; i++) { std::swap(x[m_permc[i].first], x[m_permc[i].second]); } // 2. Lz = Pb // If m_perm[end] < 0, then end with m_n - 3, otherwise end with m_n - 2 const Index end = (m_perm[m_n - 1] < 0) ? (m_n - 3) : (m_n - 2); for (Index i = 0; i <= end; i++) { const Index b1size = m_n - i - 1; const Index b2size = b1size - 1; if (m_perm[i] >= 0) { MapConstVec l(&coeff(i + 1, i), b1size); res.segment(i + 1, b1size).noalias() -= l * x[i]; } else { MapConstVec l1(&coeff(i + 2, i), b2size); MapConstVec l2(&coeff(i + 2, i + 1), b2size); res.segment(i + 2, b2size).noalias() -= (l1 * x[i] + l2 * x[i + 1]); i++; } } // 3. Dw = z for (Index i = 0; i < m_n; i++) { const Scalar e11 = diag_coeff(i); if (m_perm[i] >= 0) { x[i] *= e11; } else { const Scalar e21 = coeff(i + 1, i), e22 = diag_coeff(i + 1); const Scalar wi = x[i] * e11 + x[i + 1] * e21; x[i + 1] = x[i] * e21 + x[i + 1] * e22; x[i] = wi; i++; } } // 4. L'y = w // If m_perm[end] < 0, then start with m_n - 3, otherwise start with m_n - 2 Index i = (m_perm[m_n - 1] < 0) ? (m_n - 3) : (m_n - 2); for (; i >= 0; i--) { const Index ldim = m_n - i - 1; MapConstVec l(&coeff(i + 1, i), ldim); x[i] -= res.segment(i + 1, ldim).dot(l); if (m_perm[i] < 0) { MapConstVec l2(&coeff(i + 1, i - 1), ldim); x[i - 1] -= res.segment(i + 1, ldim).dot(l2); i--; } } // 5. x = P'y for (Index i = npermc - 1; i >= 0; i--) { std::swap(x[m_permc[i].first], x[m_permc[i].second]); } } Vector solve(ConstGenericVector& b) const { Vector res = b; solve_inplace(res); return res; } CompInfo info() const { return m_info; } }; } // namespace Spectra #endif // SPECTRA_BK_LDLT_H
17,496
31.522305
140
h
abess
abess-master/include/Spectra/LinAlg/DoubleShiftQR.h
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_DOUBLE_SHIFT_QR_H #define SPECTRA_DOUBLE_SHIFT_QR_H #include <Eigen/Core> #include <vector> // std::vector #include <algorithm> // std::min, std::fill, std::copy #include <utility> // std::swap #include <cmath> // std::abs, std::sqrt, std::pow #include <stdexcept> // std::invalid_argument, std::logic_error #include "../Util/TypeTraits.h" namespace Spectra { template <typename Scalar = double> class DoubleShiftQR { private: using Index = Eigen::Index; using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>; using Matrix3X = Eigen::Matrix<Scalar, 3, Eigen::Dynamic>; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using IntArray = Eigen::Array<unsigned char, Eigen::Dynamic, 1>; using GenericMatrix = Eigen::Ref<Matrix>; using ConstGenericMatrix = const Eigen::Ref<const Matrix>; // A very small value, but 1.0 / m_near_0 does not overflow // ~= 1e-307 for the "double" type static constexpr Scalar m_near_0 = TypeTraits<Scalar>::min() * Scalar(10); // The machine precision, ~= 1e-16 for the "double" type static constexpr Scalar m_eps = TypeTraits<Scalar>::epsilon(); Index m_n; // Dimension of the matrix Matrix m_mat_H; // A copy of the matrix to be factorized Scalar m_shift_s; // Shift constant Scalar m_shift_t; // Shift constant Matrix3X m_ref_u; // Householder reflectors IntArray m_ref_nr; // How many rows does each reflector affects // 3 - A general reflector // 2 - A Givens rotation // 1 - An identity transformation bool m_computed; // Whether matrix has been factorized // Compute sqrt(x1^2 + x2^2 + x3^2) wit high precision static Scalar stable_norm3(Scalar x1, Scalar x2, Scalar x3) { using std::abs; using std::sqrt; x1 = abs(x1); x2 = abs(x2); x3 = abs(x3); // Make x1 >= {x2, x3} if (x1 < x2) std::swap(x1, x2); if (x1 < x3) std::swap(x1, x3); // If x1 is too small, return 0 if (x1 < m_near_0) return Scalar(0); const Scalar r2 = x2 / x1, r3 = x3 / x1; // We choose a cutoff such that cutoff^4 < eps // If max(r2, r3) > cutoff, use the standard way; otherwise use Taylor series expansion // to avoid an explicit sqrt() call that may lose precision const Scalar cutoff = Scalar(0.1) * pow(m_eps, Scalar(0.25)); Scalar r = r2 * r2 + r3 * r3; r = (r2 >= cutoff || r3 >= cutoff) ? sqrt(Scalar(1) + r) : (Scalar(1) + r * (Scalar(0.5) - Scalar(0.125) * r)); // sqrt(1 + t) ~= 1 + t/2 - t^2/8 return x1 * r; } // x[i] <- x[i] / r, r = sqrt(x1^2 + x2^2 + x3^2) // Assume |x1| >= {|x2|, |x3|}, x1 != 0 static void stable_scaling(Scalar& x1, Scalar& x2, Scalar& x3) { using std::abs; using std::pow; using std::sqrt; const Scalar x1sign = (x1 > Scalar(0)) ? Scalar(1) : Scalar(-1); x1 = abs(x1); // Use the same method as in stable_norm3() const Scalar r2 = x2 / x1, r3 = x3 / x1; const Scalar cutoff = Scalar(0.1) * pow(m_eps, Scalar(0.25)); Scalar r = r2 * r2 + r3 * r3; // r = 1/sqrt(1 + r2^2 + r3^2) r = (abs(r2) >= cutoff || abs(r3) >= cutoff) ? Scalar(1) / sqrt(Scalar(1) + r) : (Scalar(1) - r * (Scalar(0.5) - Scalar(0.375) * r)); // 1/sqrt(1 + t) ~= 1 - t * (1/2 - (3/8) * t) x1 = x1sign * r; x2 = r2 * r; x3 = r3 * r; } void compute_reflector(const Scalar& x1, const Scalar& x2, const Scalar& x3, Index ind) { using std::abs; Scalar* u = &m_ref_u.coeffRef(0, ind); unsigned char* nr = m_ref_nr.data(); const Scalar x2m = abs(x2), x3m = abs(x3); // If both x2 and x3 are zero, nr is 1, and we early exit if (x2m < m_near_0 && x3m < m_near_0) { nr[ind] = 1; return; } // In general case the reflector affects 3 rows // If x3 is zero, decrease nr by 1 nr[ind] = (x3m < m_near_0) ? 2 : 3; const Scalar x_norm = (x3m < m_near_0) ? Eigen::numext::hypot(x1, x2) : stable_norm3(x1, x2, x3); // x1' = x1 - rho * ||x|| // rho = -sign(x1), if x1 == 0, we choose rho = 1 const Scalar rho = (x1 <= Scalar(0)) - (x1 > Scalar(0)); const Scalar x1_new = x1 - rho * x_norm, x1m = abs(x1_new); // Copy x to u u[0] = x1_new; u[1] = x2; u[2] = x3; if (x1m >= x2m && x1m >= x3m) { stable_scaling(u[0], u[1], u[2]); } else if (x2m >= x1m && x2m >= x3m) { stable_scaling(u[1], u[0], u[2]); } else { stable_scaling(u[2], u[0], u[1]); } } void compute_reflector(const Scalar* x, Index ind) { compute_reflector(x[0], x[1], x[2], ind); } // Update the block X = H(il:iu, il:iu) void update_block(Index il, Index iu) { // Block size const Index bsize = iu - il + 1; // If block size == 1, there is no need to apply reflectors if (bsize == 1) { m_ref_nr.coeffRef(il) = 1; return; } const Scalar x00 = m_mat_H.coeff(il, il), x01 = m_mat_H.coeff(il, il + 1), x10 = m_mat_H.coeff(il + 1, il), x11 = m_mat_H.coeff(il + 1, il + 1); // m00 = x00 * (x00 - s) + x01 * x10 + t const Scalar m00 = x00 * (x00 - m_shift_s) + x01 * x10 + m_shift_t; // m10 = x10 * (x00 + x11 - s) const Scalar m10 = x10 * (x00 + x11 - m_shift_s); // For block size == 2, do a Givens rotation on M = X * X - s * X + t * I if (bsize == 2) { // This causes nr=2 compute_reflector(m00, m10, 0, il); // Apply the reflector to X apply_PX(m_mat_H.block(il, il, 2, m_n - il), m_n, il); apply_XP(m_mat_H.block(0, il, il + 2, 2), m_n, il); m_ref_nr.coeffRef(il + 1) = 1; return; } // For block size >=3, use the regular strategy // m20 = x21 * x10 const Scalar m20 = m_mat_H.coeff(il + 2, il + 1) * m_mat_H.coeff(il + 1, il); compute_reflector(m00, m10, m20, il); // Apply the first reflector apply_PX(m_mat_H.block(il, il, 3, m_n - il), m_n, il); apply_XP(m_mat_H.block(0, il, il + (std::min)(bsize, Index(4)), 3), m_n, il); // Calculate the following reflectors // If entering this loop, block size is at least 4. for (Index i = 1; i < bsize - 2; i++) { compute_reflector(&m_mat_H.coeffRef(il + i, il + i - 1), il + i); // Apply the reflector to X apply_PX(m_mat_H.block(il + i, il + i - 1, 3, m_n - il - i + 1), m_n, il + i); apply_XP(m_mat_H.block(0, il + i, il + (std::min)(bsize, Index(i + 4)), 3), m_n, il + i); } // The last reflector // This causes nr=2 compute_reflector(m_mat_H.coeff(iu - 1, iu - 2), m_mat_H.coeff(iu, iu - 2), 0, iu - 1); // Apply the reflector to X apply_PX(m_mat_H.block(iu - 1, iu - 2, 2, m_n - iu + 2), m_n, iu - 1); apply_XP(m_mat_H.block(0, iu - 1, il + bsize, 2), m_n, iu - 1); m_ref_nr.coeffRef(iu) = 1; } // P = I - 2 * u * u' = P' // PX = X - 2 * u * (u'X) void apply_PX(GenericMatrix X, Index stride, Index u_ind) const { const Index nr = m_ref_nr.coeff(u_ind); if (nr == 1) return; const Scalar u0 = m_ref_u.coeff(0, u_ind), u1 = m_ref_u.coeff(1, u_ind); const Scalar u0_2 = Scalar(2) * u0, u1_2 = Scalar(2) * u1; const Index nrow = X.rows(); const Index ncol = X.cols(); Scalar* xptr = X.data(); if (nr == 2 || nrow == 2) { for (Index i = 0; i < ncol; i++, xptr += stride) { const Scalar tmp = u0_2 * xptr[0] + u1_2 * xptr[1]; xptr[0] -= tmp * u0; xptr[1] -= tmp * u1; } } else { const Scalar u2 = m_ref_u.coeff(2, u_ind); const Scalar u2_2 = Scalar(2) * u2; for (Index i = 0; i < ncol; i++, xptr += stride) { const Scalar tmp = u0_2 * xptr[0] + u1_2 * xptr[1] + u2_2 * xptr[2]; xptr[0] -= tmp * u0; xptr[1] -= tmp * u1; xptr[2] -= tmp * u2; } } } // x is a pointer to a vector // Px = x - 2 * dot(x, u) * u void apply_PX(Scalar* x, Index u_ind) const { const Index nr = m_ref_nr.coeff(u_ind); if (nr == 1) return; const Scalar u0 = m_ref_u.coeff(0, u_ind), u1 = m_ref_u.coeff(1, u_ind), u2 = m_ref_u.coeff(2, u_ind); // When the reflector only contains two elements, u2 has been set to zero const bool nr_is_2 = (nr == 2); const Scalar dot2 = Scalar(2) * (x[0] * u0 + x[1] * u1 + (nr_is_2 ? 0 : (x[2] * u2))); x[0] -= dot2 * u0; x[1] -= dot2 * u1; if (!nr_is_2) x[2] -= dot2 * u2; } // XP = X - 2 * (X * u) * u' void apply_XP(GenericMatrix X, Index stride, Index u_ind) const { const Index nr = m_ref_nr.coeff(u_ind); if (nr == 1) return; const Scalar u0 = m_ref_u.coeff(0, u_ind), u1 = m_ref_u.coeff(1, u_ind); const Scalar u0_2 = Scalar(2) * u0, u1_2 = Scalar(2) * u1; const int nrow = X.rows(); const int ncol = X.cols(); Scalar *X0 = X.data(), *X1 = X0 + stride; // X0 => X.col(0), X1 => X.col(1) if (nr == 2 || ncol == 2) { // tmp = 2 * u0 * X0 + 2 * u1 * X1 // X0 => X0 - u0 * tmp // X1 => X1 - u1 * tmp for (Index i = 0; i < nrow; i++) { const Scalar tmp = u0_2 * X0[i] + u1_2 * X1[i]; X0[i] -= tmp * u0; X1[i] -= tmp * u1; } } else { Scalar* X2 = X1 + stride; // X2 => X.col(2) const Scalar u2 = m_ref_u.coeff(2, u_ind); const Scalar u2_2 = Scalar(2) * u2; for (Index i = 0; i < nrow; i++) { const Scalar tmp = u0_2 * X0[i] + u1_2 * X1[i] + u2_2 * X2[i]; X0[i] -= tmp * u0; X1[i] -= tmp * u1; X2[i] -= tmp * u2; } } } public: DoubleShiftQR(Index size) : m_n(size), m_computed(false) {} DoubleShiftQR(ConstGenericMatrix& mat, const Scalar& s, const Scalar& t) : m_n(mat.rows()), m_mat_H(m_n, m_n), m_shift_s(s), m_shift_t(t), m_ref_u(3, m_n), m_ref_nr(m_n), m_computed(false) { compute(mat, s, t); } void compute(ConstGenericMatrix& mat, const Scalar& s, const Scalar& t) { using std::abs; m_n = mat.rows(); if (m_n != mat.cols()) throw std::invalid_argument("DoubleShiftQR: matrix must be square"); m_mat_H.resize(m_n, m_n); m_shift_s = s; m_shift_t = t; m_ref_u.resize(3, m_n); m_ref_nr.resize(m_n); // Make a copy of mat m_mat_H.noalias() = mat; // Obtain the indices of zero elements in the subdiagonal, // so that H can be divided into several blocks const Scalar eps_abs = m_near_0 * (m_n / m_eps); constexpr Scalar eps_rel = m_eps; std::vector<int> zero_ind; zero_ind.reserve(m_n - 1); zero_ind.push_back(0); Scalar* Hii = m_mat_H.data(); for (Index i = 0; i < m_n - 1; i++, Hii += (m_n + 1)) { // Hii[0] => m_mat_H(i, i) // Hii[1] => m_mat_H(i + 1, i) // Hii[m_n + 1] => m_mat_H(i + 1, i + 1) const Scalar h = abs(Hii[1]); // Deflate small sub-diagonal elements const Scalar diag = abs(Hii[0]) + abs(Hii[m_n + 1]); if (h <= eps_abs || h <= eps_rel * diag) { Hii[1] = 0; zero_ind.push_back(i + 1); } // Make sure m_mat_H is upper Hessenberg // Zero the elements below m_mat_H(i + 1, i) std::fill(Hii + 2, Hii + m_n - i, Scalar(0)); } zero_ind.push_back(m_n); const Index len = zero_ind.size() - 1; for (Index i = 0; i < len; i++) { const Index start = zero_ind[i]; const Index end = zero_ind[i + 1] - 1; // Compute refelctors and update each block update_block(start, end); } // Deflation on the computed result Hii = m_mat_H.data(); for (Index i = 0; i < m_n - 1; i++, Hii += (m_n + 1)) { const Scalar h = abs(Hii[1]); const Scalar diag = abs(Hii[0]) + abs(Hii[m_n + 1]); if (h <= eps_abs || h <= eps_rel * diag) Hii[1] = 0; } m_computed = true; } void matrix_QtHQ(Matrix& dest) const { if (!m_computed) throw std::logic_error("DoubleShiftQR: need to call compute() first"); dest.noalias() = m_mat_H; } // Q = P0 * P1 * ... // Q'y = P_{n-2} * ... * P1 * P0 * y void apply_QtY(Vector& y) const { if (!m_computed) throw std::logic_error("DoubleShiftQR: need to call compute() first"); Scalar* y_ptr = y.data(); const Index n1 = m_n - 1; for (Index i = 0; i < n1; i++, y_ptr++) { apply_PX(y_ptr, i); } } // Q = P0 * P1 * ... // YQ = Y * P0 * P1 * ... void apply_YQ(GenericMatrix Y) const { if (!m_computed) throw std::logic_error("DoubleShiftQR: need to call compute() first"); const Index nrow = Y.rows(); const Index n2 = m_n - 2; for (Index i = 0; i < n2; i++) { apply_XP(Y.block(0, i, nrow, 3), nrow, i); } apply_XP(Y.block(0, n2, nrow, 2), nrow, n2); } }; } // namespace Spectra #endif // SPECTRA_DOUBLE_SHIFT_QR_H
14,768
32.489796
111
h
abess
abess-master/include/Spectra/LinAlg/Lanczos.h
// Copyright (C) 2018-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_LANCZOS_H #define SPECTRA_LANCZOS_H #include <Eigen/Core> #include <cmath> // std::sqrt #include <utility> // std::forward #include <stdexcept> // std::invalid_argument #include "Arnoldi.h" namespace Spectra { // Lanczos factorization A * V = V * H + f * e' // A: n x n // V: n x k // H: k x k // f: n x 1 // e: [0, ..., 0, 1] // V and H are allocated of dimension m, so the maximum value of k is m template <typename Scalar, typename ArnoldiOpType> class Lanczos : public Arnoldi<Scalar, ArnoldiOpType> { private: using Index = Eigen::Index; using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using MapMat = Eigen::Map<Matrix>; using MapVec = Eigen::Map<Vector>; using MapConstMat = Eigen::Map<const Matrix>; using Arnoldi<Scalar, ArnoldiOpType>::m_op; using Arnoldi<Scalar, ArnoldiOpType>::m_n; using Arnoldi<Scalar, ArnoldiOpType>::m_m; using Arnoldi<Scalar, ArnoldiOpType>::m_k; using Arnoldi<Scalar, ArnoldiOpType>::m_fac_V; using Arnoldi<Scalar, ArnoldiOpType>::m_fac_H; using Arnoldi<Scalar, ArnoldiOpType>::m_fac_f; using Arnoldi<Scalar, ArnoldiOpType>::m_beta; using Arnoldi<Scalar, ArnoldiOpType>::m_near_0; using Arnoldi<Scalar, ArnoldiOpType>::m_eps; public: // Forward parameter `op` to the constructor of Arnoldi template <typename T> Lanczos(T&& op, Index m) : Arnoldi<Scalar, ArnoldiOpType>(std::forward<T>(op), m) {} // Lanczos factorization starting from step-k void factorize_from(Index from_k, Index to_m, Index& op_counter) override { using std::sqrt; if (to_m <= from_k) return; if (from_k > m_k) { std::string msg = "Lanczos: from_k (= " + std::to_string(from_k) + ") is larger than the current subspace dimension (= " + std::to_string(m_k) + ")"; throw std::invalid_argument(msg); } const Scalar beta_thresh = m_eps * sqrt(Scalar(m_n)); // Pre-allocate vectors Vector Vf(to_m); Vector w(m_n); // Keep the upperleft k x k submatrix of H and set other elements to 0 m_fac_H.rightCols(m_m - from_k).setZero(); m_fac_H.block(from_k, 0, m_m - from_k, from_k).setZero(); for (Index i = from_k; i <= to_m - 1; i++) { bool restart = false; // If beta = 0, then the next V is not full rank // We need to generate a new residual vector that is orthogonal // to the current V, which we call a restart if (m_beta < m_near_0) { MapConstMat V(m_fac_V.data(), m_n, i); // The first i columns this->expand_basis(V, 2 * i, m_fac_f, m_beta, op_counter); restart = true; } // v <- f / ||f|| MapVec v(&m_fac_V(0, i), m_n); // The (i+1)-th column v.noalias() = m_fac_f / m_beta; // Note that H[i+1, i] equals to the unrestarted beta m_fac_H(i, i - 1) = restart ? Scalar(0) : m_beta; m_fac_H(i - 1, i) = m_fac_H(i, i - 1); // Due to symmetry // w <- A * v m_op.perform_op(v.data(), w.data()); op_counter++; // f <- w - V * V'Bw = w - H[i+1, i] * V{i} - H[i+1, i+1] * V{i+1} // If restarting, we know that H[i+1, i] = 0 // First do w <- w - H[i+1, i] * V{i}, see the discussions in Section 2.3 of // Cullum and Willoughby (2002). Lanczos Algorithms for Large Symmetric Eigenvalue Computations: Vol. 1 if (!restart) w.noalias() -= m_fac_H(i, i - 1) * m_fac_V.col(i - 1); // H[i+1, i+1] = <v, w> = v'Bw m_fac_H(i, i) = m_op.inner_product(v, w); // f <- w - H[i+1, i+1] * V{i+1} m_fac_f.noalias() = w - m_fac_H(i, i) * v; m_beta = m_op.norm(m_fac_f); // f/||f|| is going to be the next column of V, so we need to test // whether V'B(f/||f||) ~= 0 const Index i1 = i + 1; MapMat Vs(m_fac_V.data(), m_n, i1); // The first (i+1) columns m_op.trans_product(Vs, m_fac_f, Vf.head(i1)); Scalar ortho_err = Vf.head(i1).cwiseAbs().maxCoeff(); // If not, iteratively correct the residual int count = 0; while (count < 5 && ortho_err > m_eps * m_beta) { // There is an edge case: when beta=||f|| is close to zero, f mostly consists // of noises of rounding errors, so the test [ortho_err < eps * beta] is very // likely to fail. In particular, if beta=0, then the test is ensured to fail. // Hence when this happens, we force f to be zero, and then restart in the // next iteration. if (m_beta < beta_thresh) { m_fac_f.setZero(); m_beta = Scalar(0); break; } // f <- f - V * Vf m_fac_f.noalias() -= Vs * Vf.head(i1); // h <- h + Vf m_fac_H(i - 1, i) += Vf[i - 1]; m_fac_H(i, i - 1) = m_fac_H(i - 1, i); m_fac_H(i, i) += Vf[i]; // beta <- ||f|| m_beta = m_op.norm(m_fac_f); m_op.trans_product(Vs, m_fac_f, Vf.head(i1)); ortho_err = Vf.head(i1).cwiseAbs().maxCoeff(); count++; } } // Indicate that this is a step-m factorization m_k = to_m; } // Apply H -> Q'HQ, where Q is from a tridiagonal QR decomposition // Function overloading here, not overriding void compress_H(const TridiagQR<Scalar>& decomp) { decomp.matrix_QtHQ(m_fac_H); m_k--; } }; } // namespace Spectra #endif // SPECTRA_LANCZOS_H
6,282
35.52907
115
h
abess
abess-master/include/Spectra/LinAlg/Orthogonalization.h
// Copyright (C) 2020 Netherlands eScience Center <f.zapata@esciencecenter.nl> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_ORTHOGONALIZATION_H #define SPECTRA_ORTHOGONALIZATION_H #include <Eigen/Core> #include <Eigen/QR> namespace Spectra { /// Check if the number of columns to skip is /// larger than 0 but smaller than the total number /// of columns of the matrix /// \param in_output Matrix to be orthogonalized /// \param left_cols_to_skip Number of left columns to be left untouched template <typename Matrix> void assert_left_cols_to_skip(Matrix& in_output, Eigen::Index left_cols_to_skip) { assert(in_output.cols() > left_cols_to_skip && "left_cols_to_skip is larger than columns of matrix"); assert(left_cols_to_skip >= 0 && "left_cols_to_skip is negative"); } /// If the the number of columns to skip is null, /// normalize the first column and set left_cols_to_skip=1 /// \param in_output Matrix to be orthogonalized /// \param left_cols_to_skip Number of left columns to be left untouched /// \return Actual number of left columns to skip template <typename Matrix> Eigen::Index treat_first_col(Matrix& in_output, Eigen::Index left_cols_to_skip) { if (left_cols_to_skip == 0) { in_output.col(0).normalize(); left_cols_to_skip = 1; } return left_cols_to_skip; } /// Orthogonalize the in_output matrix using a QR decomposition /// \param in_output Matrix to be orthogonalized template <typename Matrix> void QR_orthogonalisation(Matrix& in_output) { using InternalMatrix = Eigen::Matrix<typename Matrix::Scalar, Eigen::Dynamic, Eigen::Dynamic>; Eigen::Index nrows = in_output.rows(); Eigen::Index ncols = in_output.cols(); ncols = (std::min)(nrows, ncols); InternalMatrix I = InternalMatrix::Identity(nrows, ncols); Eigen::HouseholderQR<Matrix> qr(in_output); in_output.leftCols(ncols).noalias() = qr.householderQ() * I; } /// Orthogonalize the in_output matrix using a modified Gram Schmidt process /// \param in_output matrix to be orthogonalized /// \param left_cols_to_skip Number of left columns to be left untouched template <typename Matrix> void MGS_orthogonalisation(Matrix& in_output, Eigen::Index left_cols_to_skip = 0) { assert_left_cols_to_skip(in_output, left_cols_to_skip); left_cols_to_skip = treat_first_col(in_output, left_cols_to_skip); for (Eigen::Index k = left_cols_to_skip; k < in_output.cols(); ++k) { for (Eigen::Index j = 0; j < k; j++) { in_output.col(k) -= in_output.col(j).dot(in_output.col(k)) * in_output.col(j); } in_output.col(k).normalize(); } } /// Orthogonalize the in_output matrix using a Gram Schmidt process /// \param in_output matrix to be orthogonalized /// \param left_cols_to_skip Number of left columns to be left untouched template <typename Matrix> void GS_orthogonalisation(Matrix& in_output, Eigen::Index left_cols_to_skip = 0) { assert_left_cols_to_skip(in_output, left_cols_to_skip); left_cols_to_skip = treat_first_col(in_output, left_cols_to_skip); for (Eigen::Index j = left_cols_to_skip; j < in_output.cols(); ++j) { in_output.col(j) -= in_output.leftCols(j) * (in_output.leftCols(j).transpose() * in_output.col(j)); in_output.col(j).normalize(); } } /// Orthogonalize the subspace spanned by right columns of in_output /// against the subspace spanned by left columns /// It assumes that the left columns are already orthogonal and normalized, /// and it does not orthogonalize the left columns against each other /// \param in_output Matrix to be orthogonalized /// \param left_cols_to_skip Number of left columns to be left untouched template <typename Matrix> void subspace_orthogonalisation(Matrix& in_output, Eigen::Index left_cols_to_skip) { assert_left_cols_to_skip(in_output, left_cols_to_skip); if (left_cols_to_skip == 0) { return; } Eigen::Index right_cols_to_ortho = in_output.cols() - left_cols_to_skip; in_output.rightCols(right_cols_to_ortho) -= in_output.leftCols(left_cols_to_skip) * (in_output.leftCols(left_cols_to_skip).transpose() * in_output.rightCols(right_cols_to_ortho)); } /// Orthogonalize the in_output matrix using a Jens process /// The subspace spanned by right columns are first orthogonalized /// agains the left columns, and then a QR decomposition is applied on the right columns /// to make them orthogonalized agains each other /// \param in_output Matrix to be orthogonalized /// \param left_cols_to_skip Number of left columns to be left untouched template <typename Matrix> void JensWehner_orthogonalisation(Matrix& in_output, Eigen::Index left_cols_to_skip = 0) { assert_left_cols_to_skip(in_output, left_cols_to_skip); Eigen::Index right_cols_to_ortho = in_output.cols() - left_cols_to_skip; subspace_orthogonalisation(in_output, left_cols_to_skip); Eigen::Ref<Matrix> right_cols = in_output.rightCols(right_cols_to_ortho); QR_orthogonalisation(right_cols); } /// Orthogonalize the in_output matrix using a twice-is-enough Jens process /// \param in_output Matrix to be orthogonalized /// \param left_cols_to_skip Number of left columns to be left untouched template <typename Matrix> void twice_is_enough_orthogonalisation(Matrix& in_output, Eigen::Index left_cols_to_skip = 0) { JensWehner_orthogonalisation(in_output, left_cols_to_skip); JensWehner_orthogonalisation(in_output, left_cols_to_skip); } } // namespace Spectra #endif //SPECTRA_ORTHOGONALIZATION_H
5,705
39.183099
107
h
abess
abess-master/include/Spectra/LinAlg/RitzPairs.h
// Copyright (C) 2020 Netherlands eScience Center <n.renauld@esciencecenter.nl> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_RITZ_PAIRS_H #define SPECTRA_RITZ_PAIRS_H #include <Eigen/Core> #include <Eigen/Eigenvalues> #include "../Util/SelectionRule.h" namespace Spectra { template <typename Scalar> class SearchSpace; /// This class handles the creation and manipulation of Ritz eigen pairs /// for iterative eigensolvers such as Davidson, Jacobi-Davidson, etc. template <typename Scalar> class RitzPairs { private: using Index = Eigen::Index; using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using Array = Eigen::Array<Scalar, Eigen::Dynamic, 1>; using BoolArray = Eigen::Array<bool, Eigen::Dynamic, 1>; Vector m_values; // eigenvalues Matrix m_small_vectors; // eigenvectors of the small problem, makes restart cheaper. Matrix m_vectors; // Ritz (or harmonic Ritz) eigenvectors Matrix m_residues; // residues of the pairs BoolArray m_root_converged; public: RitzPairs() = default; /// Compute the eigen values/vectors /// /// \param search_space Instance of the class handling the search space /// \return Eigen::ComputationalInfo Whether small eigenvalue problem worked Eigen::ComputationInfo compute_eigen_pairs(const SearchSpace<Scalar>& search_space); /// Returns the size of the ritz eigen pairs /// /// \return Eigen::Index Number of pairs Index size() const { return m_values.size(); } /// Sort the eigen pairs according to the selection rule /// /// \param selection Sorting rule void sort(SortRule selection) { std::vector<Index> ind = argsort(selection, m_values); RitzPairs<Scalar> temp = *this; for (Index i = 0; i < size(); i++) { m_values[i] = temp.m_values[ind[i]]; m_vectors.col(i) = temp.m_vectors.col(ind[i]); m_residues.col(i) = temp.m_residues.col(ind[i]); m_small_vectors.col(i) = temp.m_small_vectors.col(ind[i]); } } /// Checks if the algorithm has converged and updates root_converged /// /// \param tol Tolerance for convergence /// \param number_eigenvalue Number of request eigenvalues /// \return bool true if all eigenvalues are converged bool check_convergence(Scalar tol, Index number_eigenvalues) { const Array norms = m_residues.colwise().norm(); bool converged = true; m_root_converged = BoolArray::Zero(norms.size()); for (Index j = 0; j < norms.size(); j++) { m_root_converged[j] = (norms[j] < tol); if (j < number_eigenvalues) { converged &= (norms[j] < tol); } } return converged; } const Matrix& ritz_vectors() const { return m_vectors; } const Vector& ritz_values() const { return m_values; } const Matrix& small_ritz_vectors() const { return m_small_vectors; } const Matrix& residues() const { return m_residues; } const BoolArray& converged_eigenvalues() const { return m_root_converged; } }; } // namespace Spectra #include "SearchSpace.h" namespace Spectra { /// Creates the small space matrix and computes its eigen pairs /// Also computes the ritz vectors and residues /// /// \param search_space Instance of the SearchSpace class template <typename Scalar> Eigen::ComputationInfo RitzPairs<Scalar>::compute_eigen_pairs(const SearchSpace<Scalar>& search_space) { const Matrix& basis_vectors = search_space.basis_vectors(); const Matrix& op_basis_prod = search_space.operator_basis_product(); // Form the small eigenvalue Matrix small_matrix = basis_vectors.transpose() * op_basis_prod; // Small eigenvalue problem Eigen::SelfAdjointEigenSolver<Matrix> eigen_solver(small_matrix); m_values = eigen_solver.eigenvalues(); m_small_vectors = eigen_solver.eigenvectors(); // Ritz vectors m_vectors = basis_vectors * m_small_vectors; // Residues m_residues = op_basis_prod * m_small_vectors - m_vectors * m_values.asDiagonal(); return eigen_solver.info(); } } // namespace Spectra #endif // SPECTRA_RITZ_PAIRS_H
4,472
33.145038
102
h
abess
abess-master/include/Spectra/LinAlg/SearchSpace.h
// Copyright (C) 2020 Netherlands eScience Center <n.renauld@esciencecenter.nl> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_SEARCH_SPACE_H #define SPECTRA_SEARCH_SPACE_H #include <Eigen/Core> #include "RitzPairs.h" #include "Orthogonalization.h" namespace Spectra { /// This class handles the creation and manipulation of the search space /// for iterative eigensolvers such as Davidson, Jacobi-Davidson, etc. template <typename Scalar> class SearchSpace { private: using Index = Eigen::Index; using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>; Matrix m_basis_vectors; Matrix m_op_basis_product; /// Append new vector to the basis /// /// \param new_vect Matrix of new correction vectors void append_new_vectors_to_basis(const Matrix& new_vect) { Index num_update = new_vect.cols(); m_basis_vectors.conservativeResize(Eigen::NoChange, m_basis_vectors.cols() + num_update); m_basis_vectors.rightCols(num_update).noalias() = new_vect; } public: SearchSpace() = default; /// Returns the current size of the search space Index size() const { return m_basis_vectors.cols(); } void initialize_search_space(const Eigen::Ref<const Matrix>& initial_vectors) { m_basis_vectors = initial_vectors; m_op_basis_product = Matrix(initial_vectors.rows(), 0); } /// Updates the matrix formed by the operator applied to the search space /// after the addition of new vectors in the search space. Only the product /// of the operator with the new vectors is computed and the result is appended /// to the op_basis_product member variable /// /// \param OpType Operator representing the matrix template <typename OpType> void update_operator_basis_product(OpType& op) { Index nvec = m_basis_vectors.cols() - m_op_basis_product.cols(); m_op_basis_product.conservativeResize(Eigen::NoChange, m_basis_vectors.cols()); m_op_basis_product.rightCols(nvec).noalias() = op * m_basis_vectors.rightCols(nvec); } /// Restart the search space by reducing the basis vector to the last /// Ritz eigenvector /// /// \param ritz_pair Instance of a RitzPair class /// \param size Size of the restart void restart(const RitzPairs<Scalar>& ritz_pairs, Index size) { m_basis_vectors = ritz_pairs.ritz_vectors().leftCols(size); m_op_basis_product = m_op_basis_product * ritz_pairs.small_ritz_vectors().leftCols(size); } /// Append new vectors to the search space and /// orthogonalize the resulting matrix /// /// \param new_vect Matrix of new correction vectors void extend_basis(const Matrix& new_vect) { Index left_cols_to_skip = size(); append_new_vectors_to_basis(new_vect); twice_is_enough_orthogonalisation(m_basis_vectors, left_cols_to_skip); } /// Returns the basis vectors const Matrix& basis_vectors() const { return m_basis_vectors; } /// Returns the operator applied to basis vector const Matrix& operator_basis_product() const { return m_op_basis_product; } }; } // namespace Spectra #endif // SPECTRA_SEARCH_SPACE_H
3,388
33.938144
97
h
abess
abess-master/include/Spectra/LinAlg/TridiagEigen.h
// The code was adapted from Eigen/src/Eigenvaleus/SelfAdjointEigenSolver.h // // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk> // Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_TRIDIAG_EIGEN_H #define SPECTRA_TRIDIAG_EIGEN_H #include <Eigen/Core> #include <Eigen/Jacobi> #include <stdexcept> #include "../Util/TypeTraits.h" namespace Spectra { template <typename Scalar = double> class TridiagEigen { private: using Index = Eigen::Index; // For convenience in adapting the tridiagonal_qr_step() function using RealScalar = Scalar; using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using GenericMatrix = Eigen::Ref<Matrix>; using ConstGenericMatrix = const Eigen::Ref<const Matrix>; Index m_n; Vector m_main_diag; // Main diagonal elements of the matrix Vector m_sub_diag; // Sub-diagonal elements of the matrix Matrix m_evecs; // To store eigenvectors bool m_computed; // Adapted from Eigen/src/Eigenvaleus/SelfAdjointEigenSolver.h // Francis implicit QR step. static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n) { using std::abs; // Wilkinson Shift. RealScalar td = (diag[end - 1] - diag[end]) * RealScalar(0.5); RealScalar e = subdiag[end - 1]; // Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still // underflow thus leading to inf/NaN values when using the following commented code: // RealScalar e2 = numext::abs2(subdiag[end-1]); // RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2)); // This explain the following, somewhat more complicated, version: RealScalar mu = diag[end]; if (td == RealScalar(0)) mu -= abs(e); else if (e != RealScalar(0)) { const RealScalar e2 = Eigen::numext::abs2(e); const RealScalar h = Eigen::numext::hypot(td, e); if (e2 == RealScalar(0)) mu -= e / ((td + (td > RealScalar(0) ? h : -h)) / e); else mu -= e2 / (td + (td > RealScalar(0) ? h : -h)); } RealScalar x = diag[start] - mu; RealScalar z = subdiag[start]; Eigen::Map<Matrix> q(matrixQ, n, n); // If z ever becomes zero, the Givens rotation will be the identity and // z will stay zero for all future iterations. for (Index k = start; k < end && z != RealScalar(0); ++k) { Eigen::JacobiRotation<RealScalar> rot; rot.makeGivens(x, z); const RealScalar s = rot.s(); const RealScalar c = rot.c(); // do T = G' T G RealScalar sdk = s * diag[k] + c * subdiag[k]; RealScalar dkp1 = s * subdiag[k] + c * diag[k + 1]; diag[k] = c * (c * diag[k] - s * subdiag[k]) - s * (c * subdiag[k] - s * diag[k + 1]); diag[k + 1] = s * sdk + c * dkp1; subdiag[k] = c * sdk - s * dkp1; if (k > start) subdiag[k - 1] = c * subdiag[k - 1] - s * z; // "Chasing the bulge" to return to triangular form. x = subdiag[k]; if (k < end - 1) { z = -s * subdiag[k + 1]; subdiag[k + 1] = c * subdiag[k + 1]; } // apply the givens rotation to the unit matrix Q = Q * G if (matrixQ) q.applyOnTheRight(k, k + 1, rot); } } public: TridiagEigen() : m_n(0), m_computed(false) {} TridiagEigen(ConstGenericMatrix& mat) : m_n(mat.rows()), m_computed(false) { compute(mat); } void compute(ConstGenericMatrix& mat) { using std::abs; // A very small value, but 1.0 / near_0 does not overflow // ~= 1e-307 for the "double" type constexpr Scalar near_0 = TypeTraits<Scalar>::min() * Scalar(10); m_n = mat.rows(); if (m_n != mat.cols()) throw std::invalid_argument("TridiagEigen: matrix must be square"); m_main_diag.resize(m_n); m_sub_diag.resize(m_n - 1); m_evecs.resize(m_n, m_n); m_evecs.setIdentity(); // Scale matrix to improve stability const Scalar scale = (std::max)(mat.diagonal().cwiseAbs().maxCoeff(), mat.diagonal(-1).cwiseAbs().maxCoeff()); // If scale=0, mat is a zero matrix, so we can early stop if (scale < near_0) { // m_main_diag contains eigenvalues m_main_diag.setZero(); // m_evecs has been set identity // m_evecs.setIdentity(); m_computed = true; return; } m_main_diag.noalias() = mat.diagonal() / scale; m_sub_diag.noalias() = mat.diagonal(-1) / scale; Scalar* diag = m_main_diag.data(); Scalar* subdiag = m_sub_diag.data(); Index end = m_n - 1; Index start = 0; Index iter = 0; // total number of iterations int info = 0; // 0 for success, 1 for failure const Scalar considerAsZero = TypeTraits<Scalar>::min(); const Scalar precision_inv = Scalar(1) / Eigen::NumTraits<Scalar>::epsilon(); while (end > 0) { for (Index i = start; i < end; i++) { if (abs(subdiag[i]) <= considerAsZero) subdiag[i] = Scalar(0); else { // abs(subdiag[i]) <= epsilon * sqrt(abs(diag[i]) + abs(diag[i+1])) // Scaled to prevent underflows. const Scalar scaled_subdiag = precision_inv * subdiag[i]; if (scaled_subdiag * scaled_subdiag <= (abs(diag[i]) + abs(diag[i + 1]))) subdiag[i] = Scalar(0); } } // find the largest unreduced block at the end of the matrix. while (end > 0 && subdiag[end - 1] == Scalar(0)) end--; if (end <= 0) break; // if we spent too many iterations, we give up iter++; if (iter > 30 * m_n) { info = 1; break; } start = end - 1; while (start > 0 && subdiag[start - 1] != Scalar(0)) start--; tridiagonal_qr_step(diag, subdiag, start, end, m_evecs.data(), m_n); } if (info > 0) throw std::runtime_error("TridiagEigen: eigen decomposition failed"); // Scale eigenvalues back m_main_diag *= scale; m_computed = true; } const Vector& eigenvalues() const { if (!m_computed) throw std::logic_error("TridiagEigen: need to call compute() first"); // After calling compute(), main_diag will contain the eigenvalues. return m_main_diag; } const Matrix& eigenvectors() const { if (!m_computed) throw std::logic_error("TridiagEigen: need to call compute() first"); return m_evecs; } }; } // namespace Spectra #endif // SPECTRA_TRIDIAG_EIGEN_H
7,776
32.666667
98
h
abess
abess-master/include/Spectra/LinAlg/UpperHessenbergEigen.h
// The code was adapted from Eigen/src/Eigenvaleus/EigenSolver.h // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk> // Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_UPPER_HESSENBERG_EIGEN_H #define SPECTRA_UPPER_HESSENBERG_EIGEN_H #include <Eigen/Core> #include <stdexcept> #include "UpperHessenbergSchur.h" namespace Spectra { template <typename Scalar = double> class UpperHessenbergEigen { private: using Index = Eigen::Index; using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using GenericMatrix = Eigen::Ref<Matrix>; using ConstGenericMatrix = const Eigen::Ref<const Matrix>; using Complex = std::complex<Scalar>; using ComplexMatrix = Eigen::Matrix<Complex, Eigen::Dynamic, Eigen::Dynamic>; using ComplexVector = Eigen::Matrix<Complex, Eigen::Dynamic, 1>; Index m_n; // Size of the matrix UpperHessenbergSchur<Scalar> m_schur; // Schur decomposition solver Matrix m_matT; // Schur T matrix Matrix m_eivec; // Storing eigenvectors ComplexVector m_eivalues; // Eigenvalues bool m_computed; void doComputeEigenvectors() { using std::abs; const Index size = m_eivec.cols(); const Scalar eps = Eigen::NumTraits<Scalar>::epsilon(); // inefficient! this is already computed in RealSchur Scalar norm(0); for (Index j = 0; j < size; ++j) { norm += m_matT.row(j).segment((std::max)(j - 1, Index(0)), size - (std::max)(j - 1, Index(0))).cwiseAbs().sum(); } // Backsubstitute to find vectors of upper triangular form if (norm == Scalar(0)) return; for (Index n = size - 1; n >= 0; n--) { Scalar p = m_eivalues.coeff(n).real(); Scalar q = m_eivalues.coeff(n).imag(); // Scalar vector if (q == Scalar(0)) { Scalar lastr(0), lastw(0); Index l = n; m_matT.coeffRef(n, n) = Scalar(1); for (Index i = n - 1; i >= 0; i--) { Scalar w = m_matT.coeff(i, i) - p; Scalar r = m_matT.row(i).segment(l, n - l + 1).dot(m_matT.col(n).segment(l, n - l + 1)); if (m_eivalues.coeff(i).imag() < Scalar(0)) { lastw = w; lastr = r; } else { l = i; if (m_eivalues.coeff(i).imag() == Scalar(0)) { if (w != Scalar(0)) m_matT.coeffRef(i, n) = -r / w; else m_matT.coeffRef(i, n) = -r / (eps * norm); } else // Solve real equations { Scalar x = m_matT.coeff(i, i + 1); Scalar y = m_matT.coeff(i + 1, i); Scalar denom = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag(); Scalar t = (x * lastr - lastw * r) / denom; m_matT.coeffRef(i, n) = t; if (abs(x) > abs(lastw)) m_matT.coeffRef(i + 1, n) = (-r - w * t) / x; else m_matT.coeffRef(i + 1, n) = (-lastr - y * t) / lastw; } // Overflow control Scalar t = abs(m_matT.coeff(i, n)); if ((eps * t) * t > Scalar(1)) m_matT.col(n).tail(size - i) /= t; } } } else if (q < Scalar(0) && n > 0) { // Complex vector Scalar lastra(0), lastsa(0), lastw(0); Index l = n - 1; // Last vector component imaginary so matrix is triangular if (abs(m_matT.coeff(n, n - 1)) > abs(m_matT.coeff(n - 1, n))) { m_matT.coeffRef(n - 1, n - 1) = q / m_matT.coeff(n, n - 1); m_matT.coeffRef(n - 1, n) = -(m_matT.coeff(n, n) - p) / m_matT.coeff(n, n - 1); } else { Complex cc = Complex(Scalar(0), -m_matT.coeff(n - 1, n)) / Complex(m_matT.coeff(n - 1, n - 1) - p, q); m_matT.coeffRef(n - 1, n - 1) = Eigen::numext::real(cc); m_matT.coeffRef(n - 1, n) = Eigen::numext::imag(cc); } m_matT.coeffRef(n, n - 1) = Scalar(0); m_matT.coeffRef(n, n) = Scalar(1); for (Index i = n - 2; i >= 0; i--) { Scalar ra = m_matT.row(i).segment(l, n - l + 1).dot(m_matT.col(n - 1).segment(l, n - l + 1)); Scalar sa = m_matT.row(i).segment(l, n - l + 1).dot(m_matT.col(n).segment(l, n - l + 1)); Scalar w = m_matT.coeff(i, i) - p; if (m_eivalues.coeff(i).imag() < Scalar(0)) { lastw = w; lastra = ra; lastsa = sa; } else { l = i; if (m_eivalues.coeff(i).imag() == Scalar(0)) { Complex cc = Complex(-ra, -sa) / Complex(w, q); m_matT.coeffRef(i, n - 1) = Eigen::numext::real(cc); m_matT.coeffRef(i, n) = Eigen::numext::imag(cc); } else { // Solve complex equations Scalar x = m_matT.coeff(i, i + 1); Scalar y = m_matT.coeff(i + 1, i); Scalar vr = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag() - q * q; Scalar vi = (m_eivalues.coeff(i).real() - p) * Scalar(2) * q; if ((vr == Scalar(0)) && (vi == Scalar(0))) vr = eps * norm * (abs(w) + abs(q) + abs(x) + abs(y) + abs(lastw)); Complex cc = Complex(x * lastra - lastw * ra + q * sa, x * lastsa - lastw * sa - q * ra) / Complex(vr, vi); m_matT.coeffRef(i, n - 1) = Eigen::numext::real(cc); m_matT.coeffRef(i, n) = Eigen::numext::imag(cc); if (abs(x) > (abs(lastw) + abs(q))) { m_matT.coeffRef(i + 1, n - 1) = (-ra - w * m_matT.coeff(i, n - 1) + q * m_matT.coeff(i, n)) / x; m_matT.coeffRef(i + 1, n) = (-sa - w * m_matT.coeff(i, n) - q * m_matT.coeff(i, n - 1)) / x; } else { cc = Complex(-lastra - y * m_matT.coeff(i, n - 1), -lastsa - y * m_matT.coeff(i, n)) / Complex(lastw, q); m_matT.coeffRef(i + 1, n - 1) = Eigen::numext::real(cc); m_matT.coeffRef(i + 1, n) = Eigen::numext::imag(cc); } } // Overflow control Scalar t = (std::max)(abs(m_matT.coeff(i, n - 1)), abs(m_matT.coeff(i, n))); if ((eps * t) * t > Scalar(1)) m_matT.block(i, n - 1, size - i, 2) /= t; } } // We handled a pair of complex conjugate eigenvalues, so need to skip them both n--; } } // Back transformation to get eigenvectors of original matrix Vector m_tmp(size); for (Index j = size - 1; j >= 0; j--) { m_tmp.noalias() = m_eivec.leftCols(j + 1) * m_matT.col(j).segment(0, j + 1); m_eivec.col(j) = m_tmp; } } public: UpperHessenbergEigen() : m_n(0), m_computed(false) {} UpperHessenbergEigen(ConstGenericMatrix& mat) : m_n(mat.rows()), m_computed(false) { compute(mat); } void compute(ConstGenericMatrix& mat) { using std::abs; using std::sqrt; if (mat.rows() != mat.cols()) throw std::invalid_argument("UpperHessenbergEigen: matrix must be square"); m_n = mat.rows(); // Scale matrix prior to the Schur decomposition const Scalar scale = mat.cwiseAbs().maxCoeff(); // Reduce to real Schur form m_schur.compute(mat / scale); m_schur.swap_T(m_matT); m_schur.swap_U(m_eivec); // Compute eigenvalues from matT m_eivalues.resize(m_n); Index i = 0; while (i < m_n) { // Real eigenvalue if (i == m_n - 1 || m_matT.coeff(i + 1, i) == Scalar(0)) { m_eivalues.coeffRef(i) = m_matT.coeff(i, i); ++i; } else // Complex eigenvalues { Scalar p = Scalar(0.5) * (m_matT.coeff(i, i) - m_matT.coeff(i + 1, i + 1)); Scalar z; // Compute z = sqrt(abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1))); // without overflow { Scalar t0 = m_matT.coeff(i + 1, i); Scalar t1 = m_matT.coeff(i, i + 1); Scalar maxval = (std::max)(abs(p), (std::max)(abs(t0), abs(t1))); t0 /= maxval; t1 /= maxval; Scalar p0 = p / maxval; z = maxval * sqrt(abs(p0 * p0 + t0 * t1)); } m_eivalues.coeffRef(i) = Complex(m_matT.coeff(i + 1, i + 1) + p, z); m_eivalues.coeffRef(i + 1) = Complex(m_matT.coeff(i + 1, i + 1) + p, -z); i += 2; } } // Compute eigenvectors doComputeEigenvectors(); // Scale eigenvalues back m_eivalues *= scale; m_computed = true; } const ComplexVector& eigenvalues() const { if (!m_computed) throw std::logic_error("UpperHessenbergEigen: need to call compute() first"); return m_eivalues; } ComplexMatrix eigenvectors() { using std::abs; if (!m_computed) throw std::logic_error("UpperHessenbergEigen: need to call compute() first"); Index n = m_eivec.cols(); ComplexMatrix matV(n, n); for (Index j = 0; j < n; ++j) { // imaginary part of real eigenvalue is already set to exact zero if (Eigen::numext::imag(m_eivalues.coeff(j)) == Scalar(0) || j + 1 == n) { // we have a real eigen value matV.col(j) = m_eivec.col(j).template cast<Complex>(); matV.col(j).normalize(); } else { // we have a pair of complex eigen values for (Index i = 0; i < n; ++i) { matV.coeffRef(i, j) = Complex(m_eivec.coeff(i, j), m_eivec.coeff(i, j + 1)); matV.coeffRef(i, j + 1) = Complex(m_eivec.coeff(i, j), -m_eivec.coeff(i, j + 1)); } matV.col(j).normalize(); matV.col(j + 1).normalize(); ++j; } } return matV; } }; } // namespace Spectra #endif // SPECTRA_UPPER_HESSENBERG_EIGEN_H
12,450
38.526984
174
h
abess
abess-master/include/Spectra/LinAlg/UpperHessenbergQR.h
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_UPPER_HESSENBERG_QR_H #define SPECTRA_UPPER_HESSENBERG_QR_H #include <Eigen/Core> #include <cmath> // std::abs, std::sqrt, std::pow #include <algorithm> // std::fill #include <stdexcept> // std::logic_error #include "../Util/TypeTraits.h" namespace Spectra { /// /// \defgroup Internals Internal Classes /// /// Classes for internal use. May be useful to developers. /// /// /// \ingroup Internals /// @{ /// /// /// \defgroup LinearAlgebra Linear Algebra /// /// A number of classes for linear algebra operations. /// /// /// \ingroup LinearAlgebra /// /// Perform the QR decomposition of an upper Hessenberg matrix. /// /// \tparam Scalar The element type of the matrix. /// Currently supported types are `float`, `double` and `long double`. /// template <typename Scalar = double> class UpperHessenbergQR { private: using Index = Eigen::Index; using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using RowVector = Eigen::Matrix<Scalar, 1, Eigen::Dynamic>; using Array = Eigen::Array<Scalar, Eigen::Dynamic, 1>; using GenericMatrix = Eigen::Ref<Matrix>; using ConstGenericMatrix = const Eigen::Ref<const Matrix>; Matrix m_mat_R; protected: Index m_n; // Gi = [ cos[i] sin[i]] // [-sin[i] cos[i]] // Q = G1 * G2 * ... * G_{n-1} Scalar m_shift; Array m_rot_cos; Array m_rot_sin; bool m_computed; // Given a >= b > 0, compute r = sqrt(a^2 + b^2), c = a / r, and s = b / r with high precision static void stable_scaling(const Scalar& a, const Scalar& b, Scalar& r, Scalar& c, Scalar& s) { using std::sqrt; using std::pow; // Let t = b / a, then 0 < t <= 1 // c = 1 / sqrt(1 + t^2) // s = t * c // r = a * sqrt(1 + t^2) const Scalar t = b / a; // We choose a cutoff such that cutoff^4 < eps // If t > cutoff, use the standard way; otherwise use Taylor series expansion // to avoid an explicit sqrt() call that may lose precision constexpr Scalar eps = TypeTraits<Scalar>::epsilon(); // std::pow() is not constexpr, so we do not declare cutoff to be constexpr // But most compilers should be able to compute cutoff at compile time const Scalar cutoff = Scalar(0.1) * pow(eps, Scalar(0.25)); if (t >= cutoff) { const Scalar denom = sqrt(Scalar(1) + t * t); c = Scalar(1) / denom; s = t * c; r = a * denom; } else { // 1 / sqrt(1 + t^2) ~= 1 - (1/2) * t^2 + (3/8) * t^4 // 1 / sqrt(1 + l^2) ~= 1 / l - (1/2) / l^3 + (3/8) / l^5 // == t - (1/2) * t^3 + (3/8) * t^5, where l = 1 / t // sqrt(1 + t^2) ~= 1 + (1/2) * t^2 - (1/8) * t^4 + (1/16) * t^6 // // c = 1 / sqrt(1 + t^2) ~= 1 - t^2 * (1/2 - (3/8) * t^2) // s = 1 / sqrt(1 + l^2) ~= t * (1 - t^2 * (1/2 - (3/8) * t^2)) // r = a * sqrt(1 + t^2) ~= a + (1/2) * b * t - (1/8) * b * t^3 + (1/16) * b * t^5 // == a + (b/2) * t * (1 - t^2 * (1/4 - 1/8 * t^2)) constexpr Scalar c1 = Scalar(1); constexpr Scalar c2 = Scalar(0.5); constexpr Scalar c4 = Scalar(0.25); constexpr Scalar c8 = Scalar(0.125); constexpr Scalar c38 = Scalar(0.375); const Scalar t2 = t * t; const Scalar tc = t2 * (c2 - c38 * t2); c = c1 - tc; s = t - t * tc; r = a + c2 * b * t * (c1 - t2 * (c4 - c8 * t2)); /* const Scalar t_2 = Scalar(0.5) * t; const Scalar t2_2 = t_2 * t; const Scalar t3_2 = t2_2 * t; const Scalar t4_38 = Scalar(1.5) * t2_2 * t2_2; const Scalar t5_16 = Scalar(0.25) * t3_2 * t2_2; c = Scalar(1) - t2_2 + t4_38; s = t - t3_2 + Scalar(6) * t5_16; r = a + b * (t_2 - Scalar(0.25) * t3_2 + t5_16); */ } } // Given x and y, compute 1) r = sqrt(x^2 + y^2), 2) c = x / r, 3) s = -y / r // If both x and y are zero, set c = 1 and s = 0 // We must implement it in a numerically stable way // The implementation below is shown to be more accurate than directly computing // r = std::hypot(x, y); c = x / r; s = -y / r; static void compute_rotation(const Scalar& x, const Scalar& y, Scalar& r, Scalar& c, Scalar& s) { using std::abs; // Only need xsign when x != 0 const Scalar xsign = (x > Scalar(0)) ? Scalar(1) : Scalar(-1); const Scalar xabs = abs(x); if (y == Scalar(0)) { c = (x == Scalar(0)) ? Scalar(1) : xsign; s = Scalar(0); r = xabs; return; } // Now we know y != 0 const Scalar ysign = (y > Scalar(0)) ? Scalar(1) : Scalar(-1); const Scalar yabs = abs(y); if (x == Scalar(0)) { c = Scalar(0); s = -ysign; r = yabs; return; } // Now we know x != 0, y != 0 if (xabs > yabs) { stable_scaling(xabs, yabs, r, c, s); c = xsign * c; s = -ysign * s; } else { stable_scaling(yabs, xabs, r, s, c); c = xsign * c; s = -ysign * s; } } public: /// /// Constructor to preallocate memory. Computation can /// be performed later by calling the compute() method. /// UpperHessenbergQR(Index size) : m_n(size), m_rot_cos(m_n - 1), m_rot_sin(m_n - 1), m_computed(false) {} /// /// Constructor to create an object that performs and stores the /// QR decomposition of an upper Hessenberg matrix `mat`, with an /// optional shift: \f$H-sI=QR\f$. Here \f$H\f$ stands for the matrix /// `mat`, and \f$s\f$ is the shift. /// /// \param mat Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g. /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version /// (e.g. `Eigen::Map<Eigen::MatrixXd>`). /// Only the upper triangular and the subdiagonal elements of /// the matrix are used. /// UpperHessenbergQR(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) : m_n(mat.rows()), m_shift(shift), m_rot_cos(m_n - 1), m_rot_sin(m_n - 1), m_computed(false) { compute(mat, shift); } /// /// Virtual destructor. /// virtual ~UpperHessenbergQR(){}; /// /// Compute the QR decomposition of an upper Hessenberg matrix with /// an optional shift. /// /// \param mat Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g. /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version /// (e.g. `Eigen::Map<Eigen::MatrixXd>`). /// Only the upper triangular and the subdiagonal elements of /// the matrix are used. /// virtual void compute(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) { m_n = mat.rows(); if (m_n != mat.cols()) throw std::invalid_argument("UpperHessenbergQR: matrix must be square"); m_shift = shift; m_mat_R.resize(m_n, m_n); m_rot_cos.resize(m_n - 1); m_rot_sin.resize(m_n - 1); // Make a copy of mat - s * I m_mat_R.noalias() = mat; m_mat_R.diagonal().array() -= m_shift; Scalar xi, xj, r, c, s; Scalar *Rii, *ptr; const Index n1 = m_n - 1; for (Index i = 0; i < n1; i++) { Rii = &m_mat_R.coeffRef(i, i); // Make sure R is upper Hessenberg // Zero the elements below R[i + 1, i] std::fill(Rii + 2, Rii + m_n - i, Scalar(0)); xi = Rii[0]; // R[i, i] xj = Rii[1]; // R[i + 1, i] compute_rotation(xi, xj, r, c, s); m_rot_cos.coeffRef(i) = c; m_rot_sin.coeffRef(i) = s; // For a complete QR decomposition, // we first obtain the rotation matrix // G = [ cos sin] // [-sin cos] // and then do R[i:(i + 1), i:(n - 1)] = G' * R[i:(i + 1), i:(n - 1)] // Gt << c, -s, s, c; // m_mat_R.block(i, i, 2, m_n - i) = Gt * m_mat_R.block(i, i, 2, m_n - i); Rii[0] = r; // R[i, i] => r Rii[1] = 0; // R[i + 1, i] => 0 ptr = Rii + m_n; // R[i, k], k = i+1, i+2, ..., n-1 for (Index j = i + 1; j < m_n; j++, ptr += m_n) { const Scalar tmp = ptr[0]; ptr[0] = c * tmp - s * ptr[1]; ptr[1] = s * tmp + c * ptr[1]; } // If we do not need to calculate the R matrix, then // only the cos and sin sequences are required. // In such case we only update R[i + 1, (i + 1):(n - 1)] // m_mat_R.block(i + 1, i + 1, 1, m_n - i - 1) *= c; // m_mat_R.block(i + 1, i + 1, 1, m_n - i - 1) += s * m_mat_R.block(i, i + 1, 1, m_n - i - 1); } m_computed = true; } /// /// Return the \f$R\f$ matrix in the QR decomposition, which is an /// upper triangular matrix. /// /// \return Returned matrix type will be `Eigen::Matrix<Scalar, ...>`, depending on /// the template parameter `Scalar` defined. /// virtual Matrix matrix_R() const { if (!m_computed) throw std::logic_error("UpperHessenbergQR: need to call compute() first"); return m_mat_R; } /// /// Overwrite `dest` with \f$Q'HQ = RQ + sI\f$, where \f$H\f$ is the input matrix `mat`, /// and \f$s\f$ is the shift. The result is an upper Hessenberg matrix. /// /// \param mat The matrix to be overwritten, whose type should be `Eigen::Matrix<Scalar, ...>`, /// depending on the template parameter `Scalar` defined. /// virtual void matrix_QtHQ(Matrix& dest) const { if (!m_computed) throw std::logic_error("UpperHessenbergQR: need to call compute() first"); // Make a copy of the R matrix dest.resize(m_n, m_n); dest.noalias() = m_mat_R; // Compute the RQ matrix const Index n1 = m_n - 1; for (Index i = 0; i < n1; i++) { const Scalar c = m_rot_cos.coeff(i); const Scalar s = m_rot_sin.coeff(i); // RQ[, i:(i + 1)] = RQ[, i:(i + 1)] * Gi // Gi = [ cos[i] sin[i]] // [-sin[i] cos[i]] Scalar *Yi, *Yi1; Yi = &dest.coeffRef(0, i); Yi1 = Yi + m_n; // RQ[0, i + 1] const Index i2 = i + 2; for (Index j = 0; j < i2; j++) { const Scalar tmp = Yi[j]; Yi[j] = c * tmp - s * Yi1[j]; Yi1[j] = s * tmp + c * Yi1[j]; } /* Vector dest = RQ.block(0, i, i + 2, 1); dest.block(0, i, i + 2, 1) = c * Yi - s * dest.block(0, i + 1, i + 2, 1); dest.block(0, i + 1, i + 2, 1) = s * Yi + c * dest.block(0, i + 1, i + 2, 1); */ } // Add the shift to the diagonal dest.diagonal().array() += m_shift; } /// /// Apply the \f$Q\f$ matrix to a vector \f$y\f$. /// /// \param Y A vector that will be overwritten by the matrix product \f$Qy\f$. /// /// Vector type can be `Eigen::Vector<Scalar, ...>`, depending on /// the template parameter `Scalar` defined. /// // Y -> QY = G1 * G2 * ... * Y void apply_QY(Vector& Y) const { if (!m_computed) throw std::logic_error("UpperHessenbergQR: need to call compute() first"); for (Index i = m_n - 2; i >= 0; i--) { const Scalar c = m_rot_cos.coeff(i); const Scalar s = m_rot_sin.coeff(i); // Y[i:(i + 1)] = Gi * Y[i:(i + 1)] // Gi = [ cos[i] sin[i]] // [-sin[i] cos[i]] const Scalar tmp = Y[i]; Y[i] = c * tmp + s * Y[i + 1]; Y[i + 1] = -s * tmp + c * Y[i + 1]; } } /// /// Apply the \f$Q\f$ matrix to a vector \f$y\f$. /// /// \param Y A vector that will be overwritten by the matrix product \f$Q'y\f$. /// /// Vector type can be `Eigen::Vector<Scalar, ...>`, depending on /// the template parameter `Scalar` defined. /// // Y -> Q'Y = G_{n-1}' * ... * G2' * G1' * Y void apply_QtY(Vector& Y) const { if (!m_computed) throw std::logic_error("UpperHessenbergQR: need to call compute() first"); const Index n1 = m_n - 1; for (Index i = 0; i < n1; i++) { const Scalar c = m_rot_cos.coeff(i); const Scalar s = m_rot_sin.coeff(i); // Y[i:(i + 1)] = Gi' * Y[i:(i + 1)] // Gi = [ cos[i] sin[i]] // [-sin[i] cos[i]] const Scalar tmp = Y[i]; Y[i] = c * tmp - s * Y[i + 1]; Y[i + 1] = s * tmp + c * Y[i + 1]; } } /// /// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$. /// /// \param Y A matrix that will be overwritten by the matrix product \f$QY\f$. /// /// Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g. /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version /// (e.g. `Eigen::Map<Eigen::MatrixXd>`). /// // Y -> QY = G1 * G2 * ... * Y void apply_QY(GenericMatrix Y) const { if (!m_computed) throw std::logic_error("UpperHessenbergQR: need to call compute() first"); RowVector Yi(Y.cols()), Yi1(Y.cols()); for (Index i = m_n - 2; i >= 0; i--) { const Scalar c = m_rot_cos.coeff(i); const Scalar s = m_rot_sin.coeff(i); // Y[i:(i + 1), ] = Gi * Y[i:(i + 1), ] // Gi = [ cos[i] sin[i]] // [-sin[i] cos[i]] Yi.noalias() = Y.row(i); Yi1.noalias() = Y.row(i + 1); Y.row(i) = c * Yi + s * Yi1; Y.row(i + 1) = -s * Yi + c * Yi1; } } /// /// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$. /// /// \param Y A matrix that will be overwritten by the matrix product \f$Q'Y\f$. /// /// Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g. /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version /// (e.g. `Eigen::Map<Eigen::MatrixXd>`). /// // Y -> Q'Y = G_{n-1}' * ... * G2' * G1' * Y void apply_QtY(GenericMatrix Y) const { if (!m_computed) throw std::logic_error("UpperHessenbergQR: need to call compute() first"); RowVector Yi(Y.cols()), Yi1(Y.cols()); const Index n1 = m_n - 1; for (Index i = 0; i < n1; i++) { const Scalar c = m_rot_cos.coeff(i); const Scalar s = m_rot_sin.coeff(i); // Y[i:(i + 1), ] = Gi' * Y[i:(i + 1), ] // Gi = [ cos[i] sin[i]] // [-sin[i] cos[i]] Yi.noalias() = Y.row(i); Yi1.noalias() = Y.row(i + 1); Y.row(i) = c * Yi - s * Yi1; Y.row(i + 1) = s * Yi + c * Yi1; } } /// /// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$. /// /// \param Y A matrix that will be overwritten by the matrix product \f$YQ\f$. /// /// Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g. /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version /// (e.g. `Eigen::Map<Eigen::MatrixXd>`). /// // Y -> YQ = Y * G1 * G2 * ... void apply_YQ(GenericMatrix Y) const { if (!m_computed) throw std::logic_error("UpperHessenbergQR: need to call compute() first"); /*Vector Yi(Y.rows()); for(Index i = 0; i < m_n - 1; i++) { const Scalar c = m_rot_cos.coeff(i); const Scalar s = m_rot_sin.coeff(i); // Y[, i:(i + 1)] = Y[, i:(i + 1)] * Gi // Gi = [ cos[i] sin[i]] // [-sin[i] cos[i]] Yi.noalias() = Y.col(i); Y.col(i) = c * Yi - s * Y.col(i + 1); Y.col(i + 1) = s * Yi + c * Y.col(i + 1); }*/ Scalar *Y_col_i, *Y_col_i1; const Index n1 = m_n - 1; const Index nrow = Y.rows(); for (Index i = 0; i < n1; i++) { const Scalar c = m_rot_cos.coeff(i); const Scalar s = m_rot_sin.coeff(i); Y_col_i = &Y.coeffRef(0, i); Y_col_i1 = &Y.coeffRef(0, i + 1); for (Index j = 0; j < nrow; j++) { Scalar tmp = Y_col_i[j]; Y_col_i[j] = c * tmp - s * Y_col_i1[j]; Y_col_i1[j] = s * tmp + c * Y_col_i1[j]; } } } /// /// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$. /// /// \param Y A matrix that will be overwritten by the matrix product \f$YQ'\f$. /// /// Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g. /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version /// (e.g. `Eigen::Map<Eigen::MatrixXd>`). /// // Y -> YQ' = Y * G_{n-1}' * ... * G2' * G1' void apply_YQt(GenericMatrix Y) const { if (!m_computed) throw std::logic_error("UpperHessenbergQR: need to call compute() first"); Vector Yi(Y.rows()); for (Index i = m_n - 2; i >= 0; i--) { const Scalar c = m_rot_cos.coeff(i); const Scalar s = m_rot_sin.coeff(i); // Y[, i:(i + 1)] = Y[, i:(i + 1)] * Gi' // Gi = [ cos[i] sin[i]] // [-sin[i] cos[i]] Yi.noalias() = Y.col(i); Y.col(i) = c * Yi + s * Y.col(i + 1); Y.col(i + 1) = -s * Yi + c * Y.col(i + 1); } } }; /// /// \ingroup LinearAlgebra /// /// Perform the QR decomposition of a tridiagonal matrix, a special /// case of upper Hessenberg matrices. /// /// \tparam Scalar The element type of the matrix. /// Currently supported types are `float`, `double` and `long double`. /// template <typename Scalar = double> class TridiagQR : public UpperHessenbergQR<Scalar> { private: using Index = Eigen::Index; using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using ConstGenericMatrix = const Eigen::Ref<const Matrix>; using UpperHessenbergQR<Scalar>::m_n; using UpperHessenbergQR<Scalar>::m_shift; using UpperHessenbergQR<Scalar>::m_rot_cos; using UpperHessenbergQR<Scalar>::m_rot_sin; using UpperHessenbergQR<Scalar>::m_computed; Vector m_T_diag; // diagonal elements of T Vector m_T_subd; // 1st subdiagonal of T Vector m_R_diag; // diagonal elements of R, where T = QR Vector m_R_supd; // 1st superdiagonal of R Vector m_R_supd2; // 2nd superdiagonal of R public: /// /// Constructor to preallocate memory. Computation can /// be performed later by calling the compute() method. /// TridiagQR(Index size) : UpperHessenbergQR<Scalar>(size) {} /// /// Constructor to create an object that performs and stores the /// QR decomposition of a tridiagonal matrix `mat`, with an /// optional shift: \f$T-sI=QR\f$. Here \f$T\f$ stands for the matrix /// `mat`, and \f$s\f$ is the shift. /// /// \param mat Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g. /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version /// (e.g. `Eigen::Map<Eigen::MatrixXd>`). /// Only the diagonal and subdiagonal elements of the matrix are used. /// TridiagQR(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) : UpperHessenbergQR<Scalar>(mat.rows()) { this->compute(mat, shift); } /// /// Compute the QR decomposition of a tridiagonal matrix with an /// optional shift. /// /// \param mat Matrix type can be `Eigen::Matrix<Scalar, ...>` (e.g. /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version /// (e.g. `Eigen::Map<Eigen::MatrixXd>`). /// Only the diagonal and subdiagonal elements of the matrix are used. /// void compute(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) override { using std::abs; m_n = mat.rows(); if (m_n != mat.cols()) throw std::invalid_argument("TridiagQR: matrix must be square"); m_shift = shift; m_rot_cos.resize(m_n - 1); m_rot_sin.resize(m_n - 1); // Save the diagonal and subdiagonal elements of T m_T_diag.resize(m_n); m_T_subd.resize(m_n - 1); m_T_diag.noalias() = mat.diagonal(); m_T_subd.noalias() = mat.diagonal(-1); // Deflation of small sub-diagonal elements constexpr Scalar eps = TypeTraits<Scalar>::epsilon(); for (Index i = 0; i < m_n - 1; i++) { if (abs(m_T_subd[i]) <= eps * (abs(m_T_diag[i]) + abs(m_T_diag[i + 1]))) m_T_subd[i] = Scalar(0); } // Apply shift and copy T to R m_R_diag.resize(m_n); m_R_supd.resize(m_n - 1); m_R_supd2.resize(m_n - 2); m_R_diag.array() = m_T_diag.array() - m_shift; m_R_supd.noalias() = m_T_subd; // A number of pointers to avoid repeated address calculation Scalar *c = m_rot_cos.data(), // pointer to the cosine vector *s = m_rot_sin.data(), // pointer to the sine vector r; const Index n1 = m_n - 1, n2 = m_n - 2; for (Index i = 0; i < n1; i++) { // Rdiag[i] == R[i, i] // Tsubd[i] == R[i + 1, i] // r = sqrt(R[i, i]^2 + R[i + 1, i]^2) // c = R[i, i] / r, s = -R[i + 1, i] / r this->compute_rotation(m_R_diag.coeff(i), m_T_subd.coeff(i), r, *c, *s); // For a complete QR decomposition, // we first obtain the rotation matrix // G = [ cos sin] // [-sin cos] // and then do R[i:(i + 1), i:(i + 2)] = G' * R[i:(i + 1), i:(i + 2)] // Update R[i, i] and R[i + 1, i] // The updated value of R[i, i] is known to be r // The updated value of R[i + 1, i] is known to be 0 m_R_diag.coeffRef(i) = r; // Update R[i, i + 1] and R[i + 1, i + 1] // Rsupd[i] == R[i, i + 1] // Rdiag[i + 1] == R[i + 1, i + 1] const Scalar Tii1 = m_R_supd.coeff(i); const Scalar Ti1i1 = m_R_diag.coeff(i + 1); m_R_supd.coeffRef(i) = (*c) * Tii1 - (*s) * Ti1i1; m_R_diag.coeffRef(i + 1) = (*s) * Tii1 + (*c) * Ti1i1; // Update R[i, i + 2] and R[i + 1, i + 2] // Rsupd2[i] == R[i, i + 2] // Rsupd[i + 1] == R[i + 1, i + 2] if (i < n2) { m_R_supd2.coeffRef(i) = -(*s) * m_R_supd.coeff(i + 1); m_R_supd.coeffRef(i + 1) *= (*c); } c++; s++; // If we do not need to calculate the R matrix, then // only the cos and sin sequences are required. // In such case we only update R[i + 1, (i + 1):(i + 2)] // R[i + 1, i + 1] = c * R[i + 1, i + 1] + s * R[i, i + 1]; // R[i + 1, i + 2] *= c; } m_computed = true; } /// /// Return the \f$R\f$ matrix in the QR decomposition, which is an /// upper triangular matrix. /// /// \return Returned matrix type will be `Eigen::Matrix<Scalar, ...>`, depending on /// the template parameter `Scalar` defined. /// Matrix matrix_R() const override { if (!m_computed) throw std::logic_error("TridiagQR: need to call compute() first"); Matrix R = Matrix::Zero(m_n, m_n); R.diagonal().noalias() = m_R_diag; R.diagonal(1).noalias() = m_R_supd; R.diagonal(2).noalias() = m_R_supd2; return R; } /// /// Overwrite `dest` with \f$Q'TQ = RQ + sI\f$, where \f$T\f$ is the input matrix `mat`, /// and \f$s\f$ is the shift. The result is a tridiagonal matrix. /// /// \param mat The matrix to be overwritten, whose type should be `Eigen::Matrix<Scalar, ...>`, /// depending on the template parameter `Scalar` defined. /// void matrix_QtHQ(Matrix& dest) const override { using std::abs; if (!m_computed) throw std::logic_error("TridiagQR: need to call compute() first"); // In exact arithmetics, Q'TQ = RQ + sI, so we can just apply Q to R and add the shift. // However, some numerical examples show that this algorithm decreases the precision, // so we directly apply Q' and Q to T. // Copy the saved diagonal and subdiagonal elements of T to `dest` dest.resize(m_n, m_n); dest.setZero(); dest.diagonal().noalias() = m_T_diag; dest.diagonal(-1).noalias() = m_T_subd; // Ti = [x y 0], Gi = [ cos[i] sin[i] 0], Gi' * Ti * Gi = [x' y' o'] // [y z w] [-sin[i] cos[i] 0] [y' z' w'] // [0 w u] [ 0 0 1] [o' w' u'] // // x' = c2*x - 2*c*s*y + s2*z // y' = c*s*(x-z) + (c2-s2)*y // z' = s2*x + 2*c*s*y + c2*z // o' = -s*w, w' = c*w, u' = u // // In iteration (i+1), (y', o') will be further updated to (y'', o''), // where o'' = 0, y'' = cos[i+1]*y' - sin[i+1]*o' const Index n1 = m_n - 1, n2 = m_n - 2; for (Index i = 0; i < n1; i++) { const Scalar c = m_rot_cos.coeff(i); const Scalar s = m_rot_sin.coeff(i); const Scalar cs = c * s, c2 = c * c, s2 = s * s; const Scalar x = dest.coeff(i, i), y = dest.coeff(i + 1, i), z = dest.coeff(i + 1, i + 1); const Scalar c2x = c2 * x, s2x = s2 * x, c2z = c2 * z, s2z = s2 * z; const Scalar csy2 = Scalar(2) * c * s * y; // Update the diagonal and the lower subdiagonal of dest dest.coeffRef(i, i) = c2x - csy2 + s2z; // x' dest.coeffRef(i + 1, i) = cs * (x - z) + (c2 - s2) * y; // y' dest.coeffRef(i + 1, i + 1) = s2x + csy2 + c2z; // z' if (i < n2) { const Scalar ci1 = m_rot_cos.coeff(i + 1); const Scalar si1 = m_rot_sin.coeff(i + 1); const Scalar o = -s * m_T_subd.coeff(i + 1); // o' dest.coeffRef(i + 2, i + 1) *= c; // w' dest.coeffRef(i + 1, i) = ci1 * dest.coeff(i + 1, i) - si1 * o; // y'' } } // Deflation of small sub-diagonal elements constexpr Scalar eps = TypeTraits<Scalar>::epsilon(); for (Index i = 0; i < n1; i++) { const Scalar diag = abs(dest.coeff(i, i)) + abs(dest.coeff(i + 1, i + 1)); if (abs(dest.coeff(i + 1, i)) <= eps * diag) dest.coeffRef(i + 1, i) = Scalar(0); } // Copy the lower subdiagonal to upper subdiagonal dest.diagonal(1).noalias() = dest.diagonal(-1); } }; /// /// @} /// } // namespace Spectra #endif // SPECTRA_UPPER_HESSENBERG_QR_H
28,001
34.580686
106
h
abess
abess-master/include/Spectra/LinAlg/UpperHessenbergSchur.h
// The code was adapted from Eigen/src/Eigenvaleus/RealSchur.h // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk> // Copyright (C) 2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_UPPER_HESSENBERG_SCHUR_H #define SPECTRA_UPPER_HESSENBERG_SCHUR_H #include <Eigen/Core> #include <Eigen/Jacobi> #include <Eigen/Householder> #include <stdexcept> #include "../Util/TypeTraits.h" namespace Spectra { template <typename Scalar = double> class UpperHessenbergSchur { private: using Index = Eigen::Index; using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using Vector2s = Eigen::Matrix<Scalar, 2, 1>; using Vector3s = Eigen::Matrix<Scalar, 3, 1>; using GenericMatrix = Eigen::Ref<Matrix>; using ConstGenericMatrix = const Eigen::Ref<const Matrix>; Index m_n; // Size of the matrix Matrix m_T; // T matrix, A = UTU' Matrix m_U; // U matrix, A = UTU' bool m_computed; // L1 norm of an upper Hessenberg matrix static Scalar upper_hessenberg_l1_norm(ConstGenericMatrix& x) { const Index n = x.cols(); Scalar norm(0); for (Index j = 0; j < n; j++) norm += x.col(j).segment(0, (std::min)(n, j + 2)).cwiseAbs().sum(); return norm; } // Look for single small sub-diagonal element and returns its index Index find_small_subdiag(Index iu, const Scalar& near_0) const { using std::abs; const Scalar eps = Eigen::NumTraits<Scalar>::epsilon(); Index res = iu; while (res > 0) { Scalar s = abs(m_T.coeff(res - 1, res - 1)) + abs(m_T.coeff(res, res)); s = Eigen::numext::maxi<Scalar>(s * eps, near_0); if (abs(m_T.coeff(res, res - 1)) <= s) break; res--; } return res; } // Update T given that rows iu-1 and iu decouple from the rest void split_off_two_rows(Index iu, const Scalar& ex_shift) { using std::sqrt; using std::abs; // The eigenvalues of the 2x2 matrix [a b; c d] are // trace +/- sqrt(discr/4) where discr = tr^2 - 4*det, tr = a + d, det = ad - bc Scalar p = Scalar(0.5) * (m_T.coeff(iu - 1, iu - 1) - m_T.coeff(iu, iu)); Scalar q = p * p + m_T.coeff(iu, iu - 1) * m_T.coeff(iu - 1, iu); // q = tr^2 / 4 - det = discr/4 m_T.coeffRef(iu, iu) += ex_shift; m_T.coeffRef(iu - 1, iu - 1) += ex_shift; if (q >= Scalar(0)) // Two real eigenvalues { Scalar z = sqrt(abs(q)); Eigen::JacobiRotation<Scalar> rot; rot.makeGivens((p >= Scalar(0)) ? (p + z) : (p - z), m_T.coeff(iu, iu - 1)); m_T.rightCols(m_n - iu + 1).applyOnTheLeft(iu - 1, iu, rot.adjoint()); m_T.topRows(iu + 1).applyOnTheRight(iu - 1, iu, rot); m_T.coeffRef(iu, iu - 1) = Scalar(0); m_U.applyOnTheRight(iu - 1, iu, rot); } if (iu > 1) m_T.coeffRef(iu - 1, iu - 2) = Scalar(0); } // Form shift in shift_info, and update ex_shift if an exceptional shift is performed void compute_shift(Index iu, Index iter, Scalar& ex_shift, Vector3s& shift_info) { using std::sqrt; using std::abs; shift_info.coeffRef(0) = m_T.coeff(iu, iu); shift_info.coeffRef(1) = m_T.coeff(iu - 1, iu - 1); shift_info.coeffRef(2) = m_T.coeff(iu, iu - 1) * m_T.coeff(iu - 1, iu); // Wilkinson's original ad hoc shift if (iter == 10) { ex_shift += shift_info.coeff(0); for (Index i = 0; i <= iu; ++i) m_T.coeffRef(i, i) -= shift_info.coeff(0); Scalar s = abs(m_T.coeff(iu, iu - 1)) + abs(m_T.coeff(iu - 1, iu - 2)); shift_info.coeffRef(0) = Scalar(0.75) * s; shift_info.coeffRef(1) = Scalar(0.75) * s; shift_info.coeffRef(2) = Scalar(-0.4375) * s * s; } // MATLAB's new ad hoc shift if (iter == 30) { Scalar s = (shift_info.coeff(1) - shift_info.coeff(0)) / Scalar(2); s = s * s + shift_info.coeff(2); if (s > Scalar(0)) { s = sqrt(s); if (shift_info.coeff(1) < shift_info.coeff(0)) s = -s; s = s + (shift_info.coeff(1) - shift_info.coeff(0)) / Scalar(2); s = shift_info.coeff(0) - shift_info.coeff(2) / s; ex_shift += s; for (Index i = 0; i <= iu; ++i) m_T.coeffRef(i, i) -= s; shift_info.setConstant(Scalar(0.964)); } } } // Compute index im at which Francis QR step starts and the first Householder vector void init_francis_qr_step(Index il, Index iu, const Vector3s& shift_info, Index& im, Vector3s& first_householder_vec) const { using std::abs; const Scalar eps = Eigen::NumTraits<Scalar>::epsilon(); Vector3s& v = first_householder_vec; // alias to save typing for (im = iu - 2; im >= il; --im) { const Scalar Tmm = m_T.coeff(im, im); const Scalar r = shift_info.coeff(0) - Tmm; const Scalar s = shift_info.coeff(1) - Tmm; v.coeffRef(0) = (r * s - shift_info.coeff(2)) / m_T.coeff(im + 1, im) + m_T.coeff(im, im + 1); v.coeffRef(1) = m_T.coeff(im + 1, im + 1) - Tmm - r - s; v.coeffRef(2) = m_T.coeff(im + 2, im + 1); if (im == il) break; const Scalar lhs = m_T.coeff(im, im - 1) * (abs(v.coeff(1)) + abs(v.coeff(2))); const Scalar rhs = v.coeff(0) * (abs(m_T.coeff(im - 1, im - 1)) + abs(Tmm) + abs(m_T.coeff(im + 1, im + 1))); if (abs(lhs) < eps * rhs) break; } } // P = I - tau * v * v' = P' // PX = X - tau * v * (v'X), X [3 x c] static void apply_householder_left(const Vector2s& ess, const Scalar& tau, Scalar* x, Index ncol, Index stride) { const Scalar v1 = ess.coeff(0), v2 = ess.coeff(1); const Scalar* const x_end = x + ncol * stride; for (; x < x_end; x += stride) { const Scalar tvx = tau * (x[0] + v1 * x[1] + v2 * x[2]); x[0] -= tvx; x[1] -= tvx * v1; x[2] -= tvx * v2; } } // P = I - tau * v * v' = P' // XP = X - tau * (X * v) * v', X [r x 3] static void apply_householder_right(const Vector2s& ess, const Scalar& tau, Scalar* x, Index nrow, Index stride) { const Scalar v1 = ess.coeff(0), v2 = ess.coeff(1); Scalar* x0 = x; Scalar* x1 = x + stride; Scalar* x2 = x1 + stride; for (Index i = 0; i < nrow; i++) { const Scalar txv = tau * (x0[i] + v1 * x1[i] + v2 * x2[i]); x0[i] -= txv; x1[i] -= txv * v1; x2[i] -= txv * v2; } } // Perform a Francis QR step involving rows il:iu and columns im:iu void perform_francis_qr_step(Index il, Index im, Index iu, const Vector3s& first_householder_vec, const Scalar& near_0) { using std::abs; for (Index k = im; k <= iu - 2; ++k) { const bool first_iter = (k == im); Vector3s v; if (first_iter) v = first_householder_vec; else v = m_T.template block<3, 1>(k, k - 1); Scalar tau, beta; Vector2s ess; v.makeHouseholder(ess, tau, beta); if (abs(beta) > near_0) // if v is not zero { if (first_iter && k > il) m_T.coeffRef(k, k - 1) = -m_T.coeff(k, k - 1); else if (!first_iter) m_T.coeffRef(k, k - 1) = beta; // These Householder transformations form the O(n^3) part of the algorithm // m_T.block(k, k, 3, m_n - k).applyHouseholderOnTheLeft(ess, tau, workspace); // m_T.block(0, k, (std::min)(iu, k + 3) + 1, 3).applyHouseholderOnTheRight(ess, tau, workspace); // m_U.block(0, k, m_n, 3).applyHouseholderOnTheRight(ess, tau, workspace); apply_householder_left(ess, tau, &m_T.coeffRef(k, k), m_n - k, m_n); apply_householder_right(ess, tau, &m_T.coeffRef(0, k), (std::min)(iu, k + 3) + 1, m_n); apply_householder_right(ess, tau, &m_U.coeffRef(0, k), m_n, m_n); } } // The last 2-row block Eigen::JacobiRotation<Scalar> rot; Scalar beta; rot.makeGivens(m_T.coeff(iu - 1, iu - 2), m_T.coeff(iu, iu - 2), &beta); if (abs(beta) > near_0) // if v is not zero { m_T.coeffRef(iu - 1, iu - 2) = beta; m_T.rightCols(m_n - iu + 1).applyOnTheLeft(iu - 1, iu, rot.adjoint()); m_T.topRows(iu + 1).applyOnTheRight(iu - 1, iu, rot); m_U.applyOnTheRight(iu - 1, iu, rot); } // clean up pollution due to round-off errors for (Index i = im + 2; i <= iu; ++i) { m_T.coeffRef(i, i - 2) = Scalar(0); if (i > im + 2) m_T.coeffRef(i, i - 3) = Scalar(0); } } public: UpperHessenbergSchur() : m_n(0), m_computed(false) {} UpperHessenbergSchur(ConstGenericMatrix& mat) : m_n(mat.rows()), m_computed(false) { compute(mat); } void compute(ConstGenericMatrix& mat) { using std::abs; using std::sqrt; if (mat.rows() != mat.cols()) throw std::invalid_argument("UpperHessenbergSchur: matrix must be square"); m_n = mat.rows(); m_T.resize(m_n, m_n); m_U.resize(m_n, m_n); constexpr Index max_iter_per_row = 40; const Index max_iter = m_n * max_iter_per_row; m_T.noalias() = mat; m_U.setIdentity(); // The matrix m_T is divided in three parts. // Rows 0,...,il-1 are decoupled from the rest because m_T(il,il-1) is zero. // Rows il,...,iu is the part we are working on (the active window). // Rows iu+1,...,end are already brought in triangular form. Index iu = m_n - 1; Index iter = 0; // iteration count for current eigenvalue Index total_iter = 0; // iteration count for whole matrix Scalar ex_shift(0); // sum of exceptional shifts const Scalar norm = upper_hessenberg_l1_norm(m_T); // sub-diagonal entries smaller than near_0 will be treated as zero. // We use eps^2 to enable more precision in small eigenvalues. const Scalar eps = Eigen::NumTraits<Scalar>::epsilon(); const Scalar near_0 = Eigen::numext::maxi<Scalar>(norm * eps * eps, TypeTraits<Scalar>::min()); if (norm != Scalar(0)) { while (iu >= 0) { Index il = find_small_subdiag(iu, near_0); // Check for convergence if (il == iu) // One root found { m_T.coeffRef(iu, iu) += ex_shift; if (iu > 0) m_T.coeffRef(iu, iu - 1) = Scalar(0); iu--; iter = 0; } else if (il == iu - 1) // Two roots found { split_off_two_rows(iu, ex_shift); iu -= 2; iter = 0; } else // No convergence yet { Vector3s first_householder_vec = Vector3s::Zero(), shift_info; compute_shift(iu, iter, ex_shift, shift_info); iter++; total_iter++; if (total_iter > max_iter) break; Index im; init_francis_qr_step(il, iu, shift_info, im, first_householder_vec); perform_francis_qr_step(il, im, iu, first_householder_vec, near_0); } } } if (total_iter > max_iter) throw std::runtime_error("UpperHessenbergSchur: Schur decomposition failed"); m_computed = true; } const Matrix& matrix_T() const { if (!m_computed) throw std::logic_error("UpperHessenbergSchur: need to call compute() first"); return m_T; } const Matrix& matrix_U() const { if (!m_computed) throw std::logic_error("UpperHessenbergSchur: need to call compute() first"); return m_U; } void swap_T(Matrix& other) { m_T.swap(other); } void swap_U(Matrix& other) { m_U.swap(other); } }; } // namespace Spectra #endif // SPECTRA_UPPER_HESSENBERG_SCHUR_H
13,184
35.123288
127
h
abess
abess-master/include/Spectra/MatOp/DenseCholesky.h
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_DENSE_CHOLESKY_H #define SPECTRA_DENSE_CHOLESKY_H #include <Eigen/Core> #include <Eigen/Cholesky> #include <stdexcept> #include "../Util/CompInfo.h" namespace Spectra { /// /// \ingroup MatOp /// /// This class defines the operations related to Cholesky decomposition on a /// positive definite matrix, \f$B=LL'\f$, where \f$L\f$ is a lower triangular /// matrix. It is mainly used in the SymGEigsSolver generalized eigen solver /// in the Cholesky decomposition mode. /// /// \tparam Scalar_ The element type of the matrix, for example, /// `float`, `double`, and `long double`. /// \tparam Uplo Either `Eigen::Lower` or `Eigen::Upper`, indicating which /// triangular part of the matrix is used. /// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating /// the storage format of the input matrix. /// template <typename Scalar_, int Uplo = Eigen::Lower, int Flags = Eigen::ColMajor> class DenseCholesky { public: /// /// Element type of the matrix. /// using Scalar = Scalar_; private: using Index = Eigen::Index; using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Flags>; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using MapConstVec = Eigen::Map<const Vector>; using MapVec = Eigen::Map<Vector>; const Index m_n; Eigen::LLT<Matrix, Uplo> m_decomp; CompInfo m_info; // status of the decomposition public: /// /// Constructor to create the matrix operation object. /// /// \param mat An **Eigen** matrix object, whose type can be /// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and /// `Eigen::MatrixXf`), or its mapped version /// (e.g. `Eigen::Map<Eigen::MatrixXd>`). /// template <typename Derived> DenseCholesky(const Eigen::MatrixBase<Derived>& mat) : m_n(mat.rows()), m_info(CompInfo::NotComputed) { static_assert( static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(Matrix::IsRowMajor), "DenseCholesky: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)"); if (m_n != mat.cols()) throw std::invalid_argument("DenseCholesky: matrix must be square"); m_decomp.compute(mat); m_info = (m_decomp.info() == Eigen::Success) ? CompInfo::Successful : CompInfo::NumericalIssue; } /// /// Returns the number of rows of the underlying matrix. /// Index rows() const { return m_n; } /// /// Returns the number of columns of the underlying matrix. /// Index cols() const { return m_n; } /// /// Returns the status of the computation. /// The full list of enumeration values can be found in \ref Enumerations. /// CompInfo info() const { return m_info; } /// /// Performs the lower triangular solving operation \f$y=L^{-1}x\f$. /// /// \param x_in Pointer to the \f$x\f$ vector. /// \param y_out Pointer to the \f$y\f$ vector. /// // y_out = inv(L) * x_in void lower_triangular_solve(const Scalar* x_in, Scalar* y_out) const { MapConstVec x(x_in, m_n); MapVec y(y_out, m_n); y.noalias() = m_decomp.matrixL().solve(x); } /// /// Performs the upper triangular solving operation \f$y=(L')^{-1}x\f$. /// /// \param x_in Pointer to the \f$x\f$ vector. /// \param y_out Pointer to the \f$y\f$ vector. /// // y_out = inv(L') * x_in void upper_triangular_solve(const Scalar* x_in, Scalar* y_out) const { MapConstVec x(x_in, m_n); MapVec y(y_out, m_n); y.noalias() = m_decomp.matrixU().solve(x); } }; } // namespace Spectra #endif // SPECTRA_DENSE_CHOLESKY_H
4,101
31.555556
129
h
abess
abess-master/include/Spectra/MatOp/DenseGenComplexShiftSolve.h
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_DENSE_GEN_COMPLEX_SHIFT_SOLVE_H #define SPECTRA_DENSE_GEN_COMPLEX_SHIFT_SOLVE_H #include <Eigen/Core> #include <Eigen/LU> #include <stdexcept> namespace Spectra { /// /// \ingroup MatOp /// /// This class defines the complex shift-solve operation on a general real matrix \f$A\f$, /// i.e., calculating \f$y=\mathrm{Re}\{(A-\sigma I)^{-1}x\}\f$ for any complex-valued /// \f$\sigma\f$ and real-valued vector \f$x\f$. It is mainly used in the /// GenEigsComplexShiftSolver eigen solver. /// /// \tparam Scalar_ The element type of the matrix, for example, /// `float`, `double`, and `long double`. /// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating /// the storage format of the input matrix. /// template <typename Scalar_, int Flags = Eigen::ColMajor> class DenseGenComplexShiftSolve { public: /// /// Element type of the matrix. /// using Scalar = Scalar_; private: using Index = Eigen::Index; using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Flags>; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using MapConstVec = Eigen::Map<const Vector>; using MapVec = Eigen::Map<Vector>; using ConstGenericMatrix = const Eigen::Ref<const Matrix>; using Complex = std::complex<Scalar>; using ComplexMatrix = Eigen::Matrix<Complex, Eigen::Dynamic, Eigen::Dynamic, Flags>; using ComplexVector = Eigen::Matrix<Complex, Eigen::Dynamic, 1>; using ComplexSolver = Eigen::PartialPivLU<ComplexMatrix>; ConstGenericMatrix m_mat; const Index m_n; ComplexSolver m_solver; mutable ComplexVector m_x_cache; public: /// /// Constructor to create the matrix operation object. /// /// \param mat An **Eigen** matrix object, whose type can be /// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and /// `Eigen::MatrixXf`), or its mapped version /// (e.g. `Eigen::Map<Eigen::MatrixXd>`). /// template <typename Derived> DenseGenComplexShiftSolve(const Eigen::MatrixBase<Derived>& mat) : m_mat(mat), m_n(mat.rows()) { static_assert( static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(Matrix::IsRowMajor), "DenseGenComplexShiftSolve: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)"); if (mat.rows() != mat.cols()) throw std::invalid_argument("DenseGenComplexShiftSolve: matrix must be square"); } /// /// Return the number of rows of the underlying matrix. /// Index rows() const { return m_n; } /// /// Return the number of columns of the underlying matrix. /// Index cols() const { return m_n; } /// /// Set the complex shift \f$\sigma\f$. /// /// \param sigmar Real part of \f$\sigma\f$. /// \param sigmai Imaginary part of \f$\sigma\f$. /// void set_shift(const Scalar& sigmar, const Scalar& sigmai) { m_solver.compute(m_mat.template cast<Complex>() - Complex(sigmar, sigmai) * ComplexMatrix::Identity(m_n, m_n)); m_x_cache.resize(m_n); m_x_cache.setZero(); } /// /// Perform the complex shift-solve operation /// \f$y=\mathrm{Re}\{(A-\sigma I)^{-1}x\}\f$. /// /// \param x_in Pointer to the \f$x\f$ vector. /// \param y_out Pointer to the \f$y\f$ vector. /// // y_out = Re( inv(A - sigma * I) * x_in ) void perform_op(const Scalar* x_in, Scalar* y_out) const { m_x_cache.real() = MapConstVec(x_in, m_n); MapVec y(y_out, m_n); y.noalias() = m_solver.solve(m_x_cache).real(); } }; } // namespace Spectra #endif // SPECTRA_DENSE_GEN_COMPLEX_SHIFT_SOLVE_H
4,034
32.907563
141
h
abess
abess-master/include/Spectra/MatOp/DenseGenMatProd.h
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_DENSE_GEN_MAT_PROD_H #define SPECTRA_DENSE_GEN_MAT_PROD_H #include <Eigen/Core> namespace Spectra { /// /// \defgroup MatOp Matrix Operations /// /// Define matrix operations on existing matrix objects /// /// /// \ingroup MatOp /// /// This class defines the matrix-vector multiplication operation on a /// general real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector /// \f$x\f$. It is mainly used in the GenEigsSolver and /// SymEigsSolver eigen solvers. /// /// \tparam Scalar_ The element type of the matrix, for example, /// `float`, `double`, and `long double`. /// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating /// the storage format of the input matrix. /// template <typename Scalar_, int Flags = Eigen::ColMajor> class DenseGenMatProd { public: /// /// Element type of the matrix. /// using Scalar = Scalar_; private: using Index = Eigen::Index; using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Flags>; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using MapConstVec = Eigen::Map<const Vector>; using MapVec = Eigen::Map<Vector>; using ConstGenericMatrix = const Eigen::Ref<const Matrix>; ConstGenericMatrix m_mat; public: /// /// Constructor to create the matrix operation object. /// /// \param mat An **Eigen** matrix object, whose type can be /// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and /// `Eigen::MatrixXf`), or its mapped version /// (e.g. `Eigen::Map<Eigen::MatrixXd>`). /// template <typename Derived> DenseGenMatProd(const Eigen::MatrixBase<Derived>& mat) : m_mat(mat) { static_assert( static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(Matrix::IsRowMajor), "DenseGenMatProd: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)"); } /// /// Return the number of rows of the underlying matrix. /// Index rows() const { return m_mat.rows(); } /// /// Return the number of columns of the underlying matrix. /// Index cols() const { return m_mat.cols(); } /// /// Perform the matrix-vector multiplication operation \f$y=Ax\f$. /// /// \param x_in Pointer to the \f$x\f$ vector. /// \param y_out Pointer to the \f$y\f$ vector. /// // y_out = A * x_in void perform_op(const Scalar* x_in, Scalar* y_out) const { MapConstVec x(x_in, m_mat.cols()); MapVec y(y_out, m_mat.rows()); y.noalias() = m_mat * x; } /// /// Perform the matrix-matrix multiplication operation \f$y=Ax\f$. /// Matrix operator*(const Eigen::Ref<const Matrix>& mat_in) const { return m_mat * mat_in; } /// /// Extract (i,j) element of the underlying matrix. /// Scalar operator()(Index i, Index j) const { return m_mat(i, j); } }; } // namespace Spectra #endif // SPECTRA_DENSE_GEN_MAT_PROD_H
3,352
28.672566
131
h
abess
abess-master/include/Spectra/MatOp/DenseGenRealShiftSolve.h
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_DENSE_GEN_REAL_SHIFT_SOLVE_H #define SPECTRA_DENSE_GEN_REAL_SHIFT_SOLVE_H #include <Eigen/Core> #include <Eigen/LU> #include <stdexcept> namespace Spectra { /// /// \ingroup MatOp /// /// This class defines the shift-solve operation on a general real matrix \f$A\f$, /// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and /// vector \f$x\f$. It is mainly used in the GenEigsRealShiftSolver eigen solver. /// /// \tparam Scalar_ The element type of the matrix, for example, /// `float`, `double`, and `long double`. /// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating /// the storage format of the input matrix. /// template <typename Scalar_, int Flags = Eigen::ColMajor> class DenseGenRealShiftSolve { public: /// /// Element type of the matrix. /// using Scalar = Scalar_; private: using Index = Eigen::Index; using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Flags>; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using MapConstVec = Eigen::Map<const Vector>; using MapVec = Eigen::Map<Vector>; using ConstGenericMatrix = const Eigen::Ref<const Matrix>; ConstGenericMatrix m_mat; const Index m_n; Eigen::PartialPivLU<Matrix> m_solver; public: /// /// Constructor to create the matrix operation object. /// /// \param mat An **Eigen** matrix object, whose type can be /// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and /// `Eigen::MatrixXf`), or its mapped version /// (e.g. `Eigen::Map<Eigen::MatrixXd>`). /// template <typename Derived> DenseGenRealShiftSolve(const Eigen::MatrixBase<Derived>& mat) : m_mat(mat), m_n(mat.rows()) { static_assert( static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(Matrix::IsRowMajor), "DenseGenRealShiftSolve: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)"); if (mat.rows() != mat.cols()) throw std::invalid_argument("DenseGenRealShiftSolve: matrix must be square"); } /// /// Return the number of rows of the underlying matrix. /// Index rows() const { return m_n; } /// /// Return the number of columns of the underlying matrix. /// Index cols() const { return m_n; } /// /// Set the real shift \f$\sigma\f$. /// void set_shift(const Scalar& sigma) { m_solver.compute(m_mat - sigma * Matrix::Identity(m_n, m_n)); } /// /// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$. /// /// \param x_in Pointer to the \f$x\f$ vector. /// \param y_out Pointer to the \f$y\f$ vector. /// // y_out = inv(A - sigma * I) * x_in void perform_op(const Scalar* x_in, Scalar* y_out) const { MapConstVec x(x_in, m_n); MapVec y(y_out, m_n); y.noalias() = m_solver.solve(x); } }; } // namespace Spectra #endif // SPECTRA_DENSE_GEN_REAL_SHIFT_SOLVE_H
3,358
30.990476
138
h
abess
abess-master/include/Spectra/MatOp/DenseSymMatProd.h
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_DENSE_SYM_MAT_PROD_H #define SPECTRA_DENSE_SYM_MAT_PROD_H #include <Eigen/Core> namespace Spectra { /// /// \ingroup MatOp /// /// This class defines the matrix-vector multiplication operation on a /// symmetric real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector /// \f$x\f$. It is mainly used in the SymEigsSolver eigen solver. /// /// \tparam Scalar_ The element type of the matrix, for example, /// `float`, `double`, and `long double`. /// \tparam Uplo Either `Eigen::Lower` or `Eigen::Upper`, indicating which /// triangular part of the matrix is used. /// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating /// the storage format of the input matrix. /// template <typename Scalar_, int Uplo = Eigen::Lower, int Flags = Eigen::ColMajor> class DenseSymMatProd { public: /// /// Element type of the matrix. /// using Scalar = Scalar_; private: using Index = Eigen::Index; using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Flags>; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using MapConstVec = Eigen::Map<const Vector>; using MapVec = Eigen::Map<Vector>; using ConstGenericMatrix = const Eigen::Ref<const Matrix>; ConstGenericMatrix m_mat; public: /// /// Constructor to create the matrix operation object. /// /// \param mat An **Eigen** matrix object, whose type can be /// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and /// `Eigen::MatrixXf`), or its mapped version /// (e.g. `Eigen::Map<Eigen::MatrixXd>`). /// template <typename Derived> DenseSymMatProd(const Eigen::MatrixBase<Derived>& mat) : m_mat(mat) { static_assert( static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(Matrix::IsRowMajor), "DenseSymMatProd: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)"); } /// /// Return the number of rows of the underlying matrix. /// Index rows() const { return m_mat.rows(); } /// /// Return the number of columns of the underlying matrix. /// Index cols() const { return m_mat.cols(); } /// /// Perform the matrix-vector multiplication operation \f$y=Ax\f$. /// /// \param x_in Pointer to the \f$x\f$ vector. /// \param y_out Pointer to the \f$y\f$ vector. /// // y_out = A * x_in void perform_op(const Scalar* x_in, Scalar* y_out) const { MapConstVec x(x_in, m_mat.cols()); MapVec y(y_out, m_mat.rows()); y.noalias() = m_mat.template selfadjointView<Uplo>() * x; } /// /// Perform the matrix-matrix multiplication operation \f$y=Ax\f$. /// Matrix operator*(const Eigen::Ref<const Matrix>& mat_in) const { return m_mat.template selfadjointView<Uplo>() * mat_in; } /// /// Extract (i,j) element of the underlying matrix. /// Scalar operator()(Index i, Index j) const { return m_mat(i, j); } }; } // namespace Spectra #endif // SPECTRA_DENSE_SYM_MAT_PROD_H
3,452
30.972222
131
h
abess
abess-master/include/Spectra/MatOp/DenseSymShiftSolve.h
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_DENSE_SYM_SHIFT_SOLVE_H #define SPECTRA_DENSE_SYM_SHIFT_SOLVE_H #include <Eigen/Core> #include <stdexcept> #include "../LinAlg/BKLDLT.h" #include "../Util/CompInfo.h" namespace Spectra { /// /// \ingroup MatOp /// /// This class defines the shift-solve operation on a real symmetric matrix \f$A\f$, /// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and /// vector \f$x\f$. It is mainly used in the SymEigsShiftSolver eigen solver. /// /// \tparam Scalar_ The element type of the matrix, for example, /// `float`, `double`, and `long double`. /// \tparam Uplo Either `Eigen::Lower` or `Eigen::Upper`, indicating which /// triangular part of the matrix is used. /// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating /// the storage format of the input matrix. /// template <typename Scalar_, int Uplo = Eigen::Lower, int Flags = Eigen::ColMajor> class DenseSymShiftSolve { public: /// /// Element type of the matrix. /// using Scalar = Scalar_; private: using Index = Eigen::Index; using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Flags>; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using MapConstVec = Eigen::Map<const Vector>; using MapVec = Eigen::Map<Vector>; using ConstGenericMatrix = const Eigen::Ref<const Matrix>; ConstGenericMatrix m_mat; const Index m_n; BKLDLT<Scalar> m_solver; public: /// /// Constructor to create the matrix operation object. /// /// \param mat An **Eigen** matrix object, whose type can be /// `Eigen::Matrix<Scalar, ...>` (e.g. `Eigen::MatrixXd` and /// `Eigen::MatrixXf`), or its mapped version /// (e.g. `Eigen::Map<Eigen::MatrixXd>`). /// template <typename Derived> DenseSymShiftSolve(const Eigen::MatrixBase<Derived>& mat) : m_mat(mat), m_n(mat.rows()) { static_assert( static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(Matrix::IsRowMajor), "DenseSymShiftSolve: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)"); if (m_n != mat.cols()) throw std::invalid_argument("DenseSymShiftSolve: matrix must be square"); } /// /// Return the number of rows of the underlying matrix. /// Index rows() const { return m_n; } /// /// Return the number of columns of the underlying matrix. /// Index cols() const { return m_n; } /// /// Set the real shift \f$\sigma\f$. /// void set_shift(const Scalar& sigma) { m_solver.compute(m_mat, Uplo, sigma); if (m_solver.info() != CompInfo::Successful) throw std::invalid_argument("DenseSymShiftSolve: factorization failed with the given shift"); } /// /// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$. /// /// \param x_in Pointer to the \f$x\f$ vector. /// \param y_out Pointer to the \f$y\f$ vector. /// // y_out = inv(A - sigma * I) * x_in void perform_op(const Scalar* x_in, Scalar* y_out) const { MapConstVec x(x_in, m_n); MapVec y(y_out, m_n); y.noalias() = m_solver.solve(x); } }; } // namespace Spectra #endif // SPECTRA_DENSE_SYM_SHIFT_SOLVE_H
3,643
31.828829
134
h
abess
abess-master/include/Spectra/MatOp/SparseCholesky.h
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_SPARSE_CHOLESKY_H #define SPECTRA_SPARSE_CHOLESKY_H #include <Eigen/Core> #include <Eigen/SparseCore> #include <Eigen/SparseCholesky> #include <stdexcept> #include "../Util/CompInfo.h" namespace Spectra { /// /// \ingroup MatOp /// /// This class defines the operations related to Cholesky decomposition on a /// sparse positive definite matrix, \f$B=LL'\f$, where \f$L\f$ is a lower triangular /// matrix. It is mainly used in the SymGEigsSolver generalized eigen solver /// in the Cholesky decomposition mode. /// /// \tparam Scalar_ The element type of the matrix, for example, /// `float`, `double`, and `long double`. /// \tparam Uplo Either `Eigen::Lower` or `Eigen::Upper`, indicating which /// triangular part of the matrix is used. /// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating /// the storage format of the input matrix. /// \tparam StorageIndex The type of the indices for the sparse matrix. /// template <typename Scalar_, int Uplo = Eigen::Lower, int Flags = Eigen::ColMajor, typename StorageIndex = int> class SparseCholesky { public: /// /// Element type of the matrix. /// using Scalar = Scalar_; private: using Index = Eigen::Index; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using MapConstVec = Eigen::Map<const Vector>; using MapVec = Eigen::Map<Vector>; using SparseMatrix = Eigen::SparseMatrix<Scalar, Flags, StorageIndex>; const Index m_n; Eigen::SimplicialLLT<SparseMatrix, Uplo> m_decomp; CompInfo m_info; // status of the decomposition public: /// /// Constructor to create the matrix operation object. /// /// \param mat An **Eigen** sparse matrix object, whose type can be /// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version /// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`. /// template <typename Derived> SparseCholesky(const Eigen::SparseMatrixBase<Derived>& mat) : m_n(mat.rows()) { static_assert( static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(SparseMatrix::IsRowMajor), "SparseCholesky: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)"); if (mat.rows() != mat.cols()) throw std::invalid_argument("SparseCholesky: matrix must be square"); m_decomp.compute(mat); m_info = (m_decomp.info() == Eigen::Success) ? CompInfo::Successful : CompInfo::NumericalIssue; } /// /// Returns the number of rows of the underlying matrix. /// Index rows() const { return m_n; } /// /// Returns the number of columns of the underlying matrix. /// Index cols() const { return m_n; } /// /// Returns the status of the computation. /// The full list of enumeration values can be found in \ref Enumerations. /// CompInfo info() const { return m_info; } /// /// Performs the lower triangular solving operation \f$y=L^{-1}x\f$. /// /// \param x_in Pointer to the \f$x\f$ vector. /// \param y_out Pointer to the \f$y\f$ vector. /// // y_out = inv(L) * x_in void lower_triangular_solve(const Scalar* x_in, Scalar* y_out) const { MapConstVec x(x_in, m_n); MapVec y(y_out, m_n); y.noalias() = m_decomp.permutationP() * x; m_decomp.matrixL().solveInPlace(y); } /// /// Performs the upper triangular solving operation \f$y=(L')^{-1}x\f$. /// /// \param x_in Pointer to the \f$x\f$ vector. /// \param y_out Pointer to the \f$y\f$ vector. /// // y_out = inv(L') * x_in void upper_triangular_solve(const Scalar* x_in, Scalar* y_out) const { MapConstVec x(x_in, m_n); MapVec y(y_out, m_n); y.noalias() = m_decomp.matrixU().solve(x); y = m_decomp.permutationPinv() * y; } }; } // namespace Spectra #endif // SPECTRA_SPARSE_CHOLESKY_H
4,334
32.604651
130
h
abess
abess-master/include/Spectra/MatOp/SparseGenComplexShiftSolve.h
// Copyright (C) 2020-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_SPARSE_GEN_COMPLEX_SHIFT_SOLVE_H #define SPECTRA_SPARSE_GEN_COMPLEX_SHIFT_SOLVE_H #include <Eigen/Core> #include <Eigen/SparseCore> #include <Eigen/SparseLU> #include <stdexcept> namespace Spectra { /// /// \ingroup MatOp /// /// This class defines the complex shift-solve operation on a sparse real matrix \f$A\f$, /// i.e., calculating \f$y=\mathrm{Re}\{(A-\sigma I)^{-1}x\}\f$ for any complex-valued /// \f$\sigma\f$ and real-valued vector \f$x\f$. It is mainly used in the /// GenEigsComplexShiftSolver eigen solver. /// /// \tparam Scalar_ The element type of the matrix, for example, /// `float`, `double`, and `long double`. /// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating /// the storage format of the input matrix. /// \tparam StorageIndex The type of the indices for the sparse matrix. /// template <typename Scalar_, int Flags = Eigen::ColMajor, typename StorageIndex = int> class SparseGenComplexShiftSolve { public: /// /// Element type of the matrix. /// using Scalar = Scalar_; private: using Index = Eigen::Index; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using MapConstVec = Eigen::Map<const Vector>; using MapVec = Eigen::Map<Vector>; using SparseMatrix = Eigen::SparseMatrix<Scalar, Flags, StorageIndex>; using ConstGenericSparseMatrix = const Eigen::Ref<const SparseMatrix>; using Complex = std::complex<Scalar>; using ComplexVector = Eigen::Matrix<Complex, Eigen::Dynamic, 1>; using SparseComplexMatrix = Eigen::SparseMatrix<Complex, Flags, StorageIndex>; using ComplexSolver = Eigen::SparseLU<SparseComplexMatrix>; ConstGenericSparseMatrix m_mat; const Index m_n; ComplexSolver m_solver; mutable ComplexVector m_x_cache; public: /// /// Constructor to create the matrix operation object. /// /// \param mat An **Eigen** sparse matrix object, whose type can be /// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version /// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`. /// template <typename Derived> SparseGenComplexShiftSolve(const Eigen::SparseMatrixBase<Derived>& mat) : m_mat(mat), m_n(mat.rows()) { static_assert( static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(SparseMatrix::IsRowMajor), "SparseGenComplexShiftSolve: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)"); if (mat.rows() != mat.cols()) throw std::invalid_argument("SparseGenComplexShiftSolve: matrix must be square"); } /// /// Return the number of rows of the underlying matrix. /// Index rows() const { return m_n; } /// /// Return the number of columns of the underlying matrix. /// Index cols() const { return m_n; } /// /// Set the complex shift \f$\sigma\f$. /// /// \param sigmar Real part of \f$\sigma\f$. /// \param sigmai Imaginary part of \f$\sigma\f$. /// void set_shift(const Scalar& sigmar, const Scalar& sigmai) { // Create a sparse idendity matrix (1 + 0i on diagonal) SparseComplexMatrix I(m_n, m_n); I.setIdentity(); // Sparse LU decomposition m_solver.compute(m_mat.template cast<Complex>() - Complex(sigmar, sigmai) * I); // Set cache to zero m_x_cache.resize(m_n); m_x_cache.setZero(); } /// /// Perform the complex shift-solve operation /// \f$y=\mathrm{Re}\{(A-\sigma I)^{-1}x\}\f$. /// /// \param x_in Pointer to the \f$x\f$ vector. /// \param y_out Pointer to the \f$y\f$ vector. /// // y_out = Re( inv(A - sigma * I) * x_in ) void perform_op(const Scalar* x_in, Scalar* y_out) const { m_x_cache.real() = MapConstVec(x_in, m_n); MapVec y(y_out, m_n); y.noalias() = m_solver.solve(m_x_cache).real(); } }; } // namespace Spectra #endif // SPECTRA_SPARSE_GEN_COMPLEX_SHIFT_SOLVE_H
4,345
33.768
142
h
abess
abess-master/include/Spectra/MatOp/SparseGenMatProd.h
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_SPARSE_GEN_MAT_PROD_H #define SPECTRA_SPARSE_GEN_MAT_PROD_H #include <Eigen/Core> #include <Eigen/SparseCore> namespace Spectra { /// /// \ingroup MatOp /// /// This class defines the matrix-vector multiplication operation on a /// sparse real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector /// \f$x\f$. It is mainly used in the GenEigsSolver and SymEigsSolver /// eigen solvers. /// /// \tparam Scalar_ The element type of the matrix, for example, /// `float`, `double`, and `long double`. /// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating /// the storage format of the input matrix. /// \tparam StorageIndex The type of the indices for the sparse matrix. /// template <typename Scalar_, int Flags = Eigen::ColMajor, typename StorageIndex = int> class SparseGenMatProd { public: /// /// Element type of the matrix. /// using Scalar = Scalar_; private: using Index = Eigen::Index; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using MapConstVec = Eigen::Map<const Vector>; using MapVec = Eigen::Map<Vector>; using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>; using SparseMatrix = Eigen::SparseMatrix<Scalar, Flags, StorageIndex>; using ConstGenericSparseMatrix = const Eigen::Ref<const SparseMatrix>; ConstGenericSparseMatrix m_mat; public: /// /// Constructor to create the matrix operation object. /// /// \param mat An **Eigen** sparse matrix object, whose type can be /// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version /// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`. /// template <typename Derived> SparseGenMatProd(const Eigen::SparseMatrixBase<Derived>& mat) : m_mat(mat) { static_assert( static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(SparseMatrix::IsRowMajor), "SparseGenMatProd: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)"); } /// /// Return the number of rows of the underlying matrix. /// Index rows() const { return m_mat.rows(); } /// /// Return the number of columns of the underlying matrix. /// Index cols() const { return m_mat.cols(); } /// /// Perform the matrix-vector multiplication operation \f$y=Ax\f$. /// /// \param x_in Pointer to the \f$x\f$ vector. /// \param y_out Pointer to the \f$y\f$ vector. /// // y_out = A * x_in void perform_op(const Scalar* x_in, Scalar* y_out) const { MapConstVec x(x_in, m_mat.cols()); MapVec y(y_out, m_mat.rows()); y.noalias() = m_mat * x; } /// /// Perform the matrix-matrix multiplication operation \f$y=Ax\f$. /// Matrix operator*(const Eigen::Ref<const Matrix>& mat_in) const { return m_mat * mat_in; } /// /// Extract (i,j) element of the underlying matrix. /// Scalar operator()(Index i, Index j) const { return m_mat.coeff(i, j); } }; } // namespace Spectra #endif // SPECTRA_SPARSE_GEN_MAT_PROD_H
3,470
31.138889
132
h
abess
abess-master/include/Spectra/MatOp/SparseGenRealShiftSolve.h
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_SPARSE_GEN_REAL_SHIFT_SOLVE_H #define SPECTRA_SPARSE_GEN_REAL_SHIFT_SOLVE_H #include <Eigen/Core> #include <Eigen/SparseCore> #include <Eigen/SparseLU> #include <stdexcept> namespace Spectra { /// /// \ingroup MatOp /// /// This class defines the shift-solve operation on a sparse real matrix \f$A\f$, /// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and /// vector \f$x\f$. It is mainly used in the GenEigsRealShiftSolver eigen solver. /// /// \tparam Scalar_ The element type of the matrix, for example, /// `float`, `double`, and `long double`. /// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating /// the storage format of the input matrix. /// \tparam StorageIndex The type of the indices for the sparse matrix. /// template <typename Scalar_, int Flags = Eigen::ColMajor, typename StorageIndex = int> class SparseGenRealShiftSolve { public: /// /// Element type of the matrix. /// using Scalar = Scalar_; private: using Index = Eigen::Index; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using MapConstVec = Eigen::Map<const Vector>; using MapVec = Eigen::Map<Vector>; using SparseMatrix = Eigen::SparseMatrix<Scalar, Flags, StorageIndex>; using ConstGenericSparseMatrix = const Eigen::Ref<const SparseMatrix>; ConstGenericSparseMatrix m_mat; const Index m_n; Eigen::SparseLU<SparseMatrix> m_solver; public: /// /// Constructor to create the matrix operation object. /// /// \param mat An **Eigen** sparse matrix object, whose type can be /// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version /// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`. /// template <typename Derived> SparseGenRealShiftSolve(const Eigen::SparseMatrixBase<Derived>& mat) : m_mat(mat), m_n(mat.rows()) { static_assert( static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(SparseMatrix::IsRowMajor), "SparseGenRealShiftSolve: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)"); if (mat.rows() != mat.cols()) throw std::invalid_argument("SparseGenRealShiftSolve: matrix must be square"); } /// /// Return the number of rows of the underlying matrix. /// Index rows() const { return m_n; } /// /// Return the number of columns of the underlying matrix. /// Index cols() const { return m_n; } /// /// Set the real shift \f$\sigma\f$. /// void set_shift(const Scalar& sigma) { SparseMatrix I(m_n, m_n); I.setIdentity(); m_solver.compute(m_mat - sigma * I); if (m_solver.info() != Eigen::Success) throw std::invalid_argument("SparseGenRealShiftSolve: factorization failed with the given shift"); } /// /// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$. /// /// \param x_in Pointer to the \f$x\f$ vector. /// \param y_out Pointer to the \f$y\f$ vector. /// // y_out = inv(A - sigma * I) * x_in void perform_op(const Scalar* x_in, Scalar* y_out) const { MapConstVec x(x_in, m_n); MapVec y(y_out, m_n); y.noalias() = m_solver.solve(x); } }; } // namespace Spectra #endif // SPECTRA_SPARSE_GEN_REAL_SHIFT_SOLVE_H
3,706
32.396396
139
h
abess
abess-master/include/Spectra/MatOp/SparseRegularInverse.h
// Copyright (C) 2017-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_SPARSE_REGULAR_INVERSE_H #define SPECTRA_SPARSE_REGULAR_INVERSE_H #include <Eigen/Core> #include <Eigen/SparseCore> #include <Eigen/IterativeLinearSolvers> #include <stdexcept> namespace Spectra { /// /// \ingroup MatOp /// /// This class defines matrix operations required by the generalized eigen solver /// in the regular inverse mode. For a sparse and positive definite matrix \f$B\f$, /// it implements the matrix-vector product \f$y=Bx\f$ and the linear equation /// solving operation \f$y=B^{-1}x\f$. /// /// This class is intended to be used with the SymGEigsSolver generalized eigen solver /// in the regular inverse mode. /// /// \tparam Scalar_ The element type of the matrix, for example, /// `float`, `double`, and `long double`. /// \tparam Uplo Either `Eigen::Lower` or `Eigen::Upper`, indicating which /// triangular part of the matrix is used. /// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating /// the storage format of the input matrix. /// \tparam StorageIndex The type of the indices for the sparse matrix. /// template <typename Scalar_, int Uplo = Eigen::Lower, int Flags = Eigen::ColMajor, typename StorageIndex = int> class SparseRegularInverse { public: /// /// Element type of the matrix. /// using Scalar = Scalar_; private: using Index = Eigen::Index; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using MapConstVec = Eigen::Map<const Vector>; using MapVec = Eigen::Map<Vector>; using SparseMatrix = Eigen::SparseMatrix<Scalar, Flags, StorageIndex>; using ConstGenericSparseMatrix = const Eigen::Ref<const SparseMatrix>; ConstGenericSparseMatrix m_mat; const Index m_n; Eigen::ConjugateGradient<SparseMatrix> m_cg; mutable CompInfo m_info; public: /// /// Constructor to create the matrix operation object. /// /// \param mat An **Eigen** sparse matrix object, whose type can be /// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version /// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`. /// template <typename Derived> SparseRegularInverse(const Eigen::SparseMatrixBase<Derived>& mat) : m_mat(mat), m_n(mat.rows()) { static_assert( static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(SparseMatrix::IsRowMajor), "SparseRegularInverse: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)"); if (mat.rows() != mat.cols()) throw std::invalid_argument("SparseRegularInverse: matrix must be square"); m_cg.compute(mat); m_info = (m_cg.info() == Eigen::Success) ? CompInfo::Successful : CompInfo::NumericalIssue; } /// /// Return the number of rows of the underlying matrix. /// Index rows() const { return m_n; } /// /// Return the number of columns of the underlying matrix. /// Index cols() const { return m_n; } /// /// Returns the status of the computation. /// The full list of enumeration values can be found in \ref Enumerations. /// CompInfo info() const { return m_info; } /// /// Perform the solving operation \f$y=B^{-1}x\f$. /// /// \param x_in Pointer to the \f$x\f$ vector. /// \param y_out Pointer to the \f$y\f$ vector. /// // y_out = inv(B) * x_in void solve(const Scalar* x_in, Scalar* y_out) const { MapConstVec x(x_in, m_n); MapVec y(y_out, m_n); y.noalias() = m_cg.solve(x); m_info = (m_cg.info() == Eigen::Success) ? CompInfo::Successful : CompInfo::NotConverging; if (m_info != CompInfo::Successful) throw std::runtime_error("SparseRegularInverse: CG solver does not converge"); } /// /// Perform the matrix-vector multiplication operation \f$y=Bx\f$. /// /// \param x_in Pointer to the \f$x\f$ vector. /// \param y_out Pointer to the \f$y\f$ vector. /// // y_out = B * x_in void perform_op(const Scalar* x_in, Scalar* y_out) const { MapConstVec x(x_in, m_n); MapVec y(y_out, m_n); y.noalias() = m_mat.template selfadjointView<Uplo>() * x; } }; } // namespace Spectra #endif // SPECTRA_SPARSE_REGULAR_INVERSE_H
4,681
33.426471
136
h
abess
abess-master/include/Spectra/MatOp/SparseSymMatProd.h
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_SPARSE_SYM_MAT_PROD_H #define SPECTRA_SPARSE_SYM_MAT_PROD_H #include <Eigen/Core> #include <Eigen/SparseCore> namespace Spectra { /// /// \ingroup MatOp /// /// This class defines the matrix-vector multiplication operation on a /// sparse real symmetric matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector /// \f$x\f$. It is mainly used in the SymEigsSolver eigen solver. /// /// \tparam Scalar_ The element type of the matrix, for example, /// `float`, `double`, and `long double`. /// \tparam Uplo Either `Eigen::Lower` or `Eigen::Upper`, indicating which /// triangular part of the matrix is used. /// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating /// the storage format of the input matrix. /// \tparam StorageIndex The type of the indices for the sparse matrix. /// template <typename Scalar_, int Uplo = Eigen::Lower, int Flags = Eigen::ColMajor, typename StorageIndex = int> class SparseSymMatProd { public: /// /// Element type of the matrix. /// using Scalar = Scalar_; private: using Index = Eigen::Index; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using MapConstVec = Eigen::Map<const Vector>; using MapVec = Eigen::Map<Vector>; using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>; using SparseMatrix = Eigen::SparseMatrix<Scalar, Flags, StorageIndex>; using ConstGenericSparseMatrix = const Eigen::Ref<const SparseMatrix>; ConstGenericSparseMatrix m_mat; public: /// /// Constructor to create the matrix operation object. /// /// \param mat An **Eigen** sparse matrix object, whose type can be /// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version /// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`. /// template <typename Derived> SparseSymMatProd(const Eigen::SparseMatrixBase<Derived>& mat) : m_mat(mat) { static_assert( static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(SparseMatrix::IsRowMajor), "SparseSymMatProd: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)"); } /// /// Return the number of rows of the underlying matrix. /// Index rows() const { return m_mat.rows(); } /// /// Return the number of columns of the underlying matrix. /// Index cols() const { return m_mat.cols(); } /// /// Perform the matrix-vector multiplication operation \f$y=Ax\f$. /// /// \param x_in Pointer to the \f$x\f$ vector. /// \param y_out Pointer to the \f$y\f$ vector. /// // y_out = A * x_in void perform_op(const Scalar* x_in, Scalar* y_out) const { MapConstVec x(x_in, m_mat.cols()); MapVec y(y_out, m_mat.rows()); y.noalias() = m_mat.template selfadjointView<Uplo>() * x; } /// /// Perform the matrix-matrix multiplication operation \f$y=Ax\f$. /// Matrix operator*(const Eigen::Ref<const Matrix>& mat_in) const { return m_mat.template selfadjointView<Uplo>() * mat_in; } /// /// Extract (i,j) element of the underlying matrix. /// Scalar operator()(Index i, Index j) const { return m_mat.coeff(i, j); } }; } // namespace Spectra #endif // SPECTRA_SPARSE_SYM_MAT_PROD_H
3,695
32.908257
132
h
abess
abess-master/include/Spectra/MatOp/SparseSymShiftSolve.h
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_SPARSE_SYM_SHIFT_SOLVE_H #define SPECTRA_SPARSE_SYM_SHIFT_SOLVE_H #include <Eigen/Core> #include <Eigen/SparseCore> #include <Eigen/SparseLU> #include <stdexcept> namespace Spectra { /// /// \ingroup MatOp /// /// This class defines the shift-solve operation on a sparse real symmetric matrix \f$A\f$, /// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and /// vector \f$x\f$. It is mainly used in the SymEigsShiftSolver eigen solver. /// /// \tparam Scalar_ The element type of the matrix, for example, /// `float`, `double`, and `long double`. /// \tparam Uplo Either `Eigen::Lower` or `Eigen::Upper`, indicating which /// triangular part of the matrix is used. /// \tparam Flags Either `Eigen::ColMajor` or `Eigen::RowMajor`, indicating /// the storage format of the input matrix. /// \tparam StorageIndex The type of the indices for the sparse matrix. /// template <typename Scalar_, int Uplo = Eigen::Lower, int Flags = Eigen::ColMajor, typename StorageIndex = int> class SparseSymShiftSolve { public: /// /// Element type of the matrix. /// using Scalar = Scalar_; private: using Index = Eigen::Index; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using MapConstVec = Eigen::Map<const Vector>; using MapVec = Eigen::Map<Vector>; using SparseMatrix = Eigen::SparseMatrix<Scalar, Flags, StorageIndex>; using ConstGenericSparseMatrix = const Eigen::Ref<const SparseMatrix>; ConstGenericSparseMatrix m_mat; const Index m_n; Eigen::SparseLU<SparseMatrix> m_solver; public: /// /// Constructor to create the matrix operation object. /// /// \param mat An **Eigen** sparse matrix object, whose type can be /// `Eigen::SparseMatrix<Scalar, ...>` or its mapped version /// `Eigen::Map<Eigen::SparseMatrix<Scalar, ...> >`. /// template <typename Derived> SparseSymShiftSolve(const Eigen::SparseMatrixBase<Derived>& mat) : m_mat(mat), m_n(mat.rows()) { static_assert( static_cast<int>(Derived::PlainObject::IsRowMajor) == static_cast<int>(SparseMatrix::IsRowMajor), "SparseSymShiftSolve: the \"Flags\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)"); if (mat.rows() != mat.cols()) throw std::invalid_argument("SparseSymShiftSolve: matrix must be square"); } /// /// Return the number of rows of the underlying matrix. /// Index rows() const { return m_n; } /// /// Return the number of columns of the underlying matrix. /// Index cols() const { return m_n; } /// /// Set the real shift \f$\sigma\f$. /// void set_shift(const Scalar& sigma) { SparseMatrix mat = m_mat.template selfadjointView<Uplo>(); SparseMatrix identity(m_n, m_n); identity.setIdentity(); mat = mat - sigma * identity; m_solver.isSymmetric(true); m_solver.compute(mat); if (m_solver.info() != Eigen::Success) throw std::invalid_argument("SparseSymShiftSolve: factorization failed with the given shift"); } /// /// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$. /// /// \param x_in Pointer to the \f$x\f$ vector. /// \param y_out Pointer to the \f$y\f$ vector. /// // y_out = inv(A - sigma * I) * x_in void perform_op(const Scalar* x_in, Scalar* y_out) const { MapConstVec x(x_in, m_n); MapVec y(y_out, m_n); y.noalias() = m_solver.solve(x); } }; } // namespace Spectra #endif // SPECTRA_SPARSE_SYM_SHIFT_SOLVE_H
3,989
33.695652
135
h
abess
abess-master/include/Spectra/MatOp/SymShiftInvert.h
// Copyright (C) 2020-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_SYM_SHIFT_INVERT_H #define SPECTRA_SYM_SHIFT_INVERT_H #include <Eigen/Core> #include <Eigen/SparseCore> #include <Eigen/SparseLU> #include <stdexcept> #include <type_traits> // std::conditional, std::is_same #include "../LinAlg/BKLDLT.h" #include "../Util/CompInfo.h" namespace Spectra { /// \cond // Compute and factorize A-sigma*B without unnecessary copying // Default case: A is sparse, B is sparse template <bool AIsSparse, bool BIsSparse, int UploA, int UploB> class SymShiftInvertHelper { public: template <typename Scalar, typename Fac, typename ArgA, typename ArgB> static bool factorize(Fac& fac, const ArgA& A, const ArgB& B, const Scalar& sigma) { using SpMat = typename ArgA::PlainObject; SpMat matA = A.template selfadjointView<UploA>(); SpMat matB = B.template selfadjointView<UploB>(); SpMat mat = matA - sigma * matB; // SparseLU solver fac.isSymmetric(true); fac.compute(mat); // Return true if successful return fac.info() == Eigen::Success; } }; // A is dense, B is dense or sparse template <bool BIsSparse, int UploA, int UploB> class SymShiftInvertHelper<false, BIsSparse, UploA, UploB> { public: template <typename Scalar, typename Fac, typename ArgA, typename ArgB> static bool factorize(Fac& fac, const ArgA& A, const ArgB& B, const Scalar& sigma) { using Matrix = typename ArgA::PlainObject; // Make a copy of the <UploA> triangular part of A Matrix mat(A.rows(), A.cols()); mat.template triangularView<UploA>() = A; // Update <UploA> triangular part of mat if (UploA == UploB) mat -= (B * sigma).template triangularView<UploA>(); else mat -= (B * sigma).template triangularView<UploB>().transpose(); // BKLDLT solver fac.compute(mat, UploA); // Return true if successful return fac.info() == CompInfo::Successful; } }; // A is sparse, B is dense template <int UploA, int UploB> class SymShiftInvertHelper<true, false, UploA, UploB> { public: template <typename Scalar, typename Fac, typename ArgA, typename ArgB> static bool factorize(Fac& fac, const ArgA& A, const ArgB& B, const Scalar& sigma) { using Matrix = typename ArgB::PlainObject; // Construct the <UploB> triangular part of -sigma*B Matrix mat(B.rows(), B.cols()); mat.template triangularView<UploB>() = -sigma * B; // Update <UploB> triangular part of mat if (UploA == UploB) mat += A.template triangularView<UploB>(); else mat += A.template triangularView<UploA>().transpose(); // BKLDLT solver fac.compute(mat, UploB); // Return true if successful return fac.info() == CompInfo::Successful; } }; /// \endcond /// /// \ingroup MatOp /// /// This class defines matrix operations required by the generalized eigen solver /// in the shift-and-invert mode. Given two symmetric matrices \f$A\f$ and \f$B\f$, /// it solves the linear equation \f$y=(A-\sigma B)^{-1}x\f$, where \f$\sigma\f$ is a real shift. /// Each of \f$A\f$ and \f$B\f$ can be a dense or sparse matrix. /// /// This class is intended to be used with the SymGEigsShiftSolver generalized eigen solver. /// /// \tparam Scalar_ The element type of the matrices. /// Currently supported types are `float`, `double`, and `long double`. /// \tparam TypeA The type of the \f$A\f$ matrix, indicating whether \f$A\f$ is /// dense or sparse. Possible values are `Eigen::Dense` and `Eigen::Sparse`. /// \tparam TypeB The type of the \f$B\f$ matrix, indicating whether \f$B\f$ is /// dense or sparse. Possible values are `Eigen::Dense` and `Eigen::Sparse`. /// \tparam UploA Whether the lower or upper triangular part of \f$A\f$ should be used. /// Possible values are `Eigen::Lower` and `Eigen::Upper`. /// \tparam UploB Whether the lower or upper triangular part of \f$B\f$ should be used. /// Possible values are `Eigen::Lower` and `Eigen::Upper`. /// \tparam FlagsA Additional flags for the matrix class of \f$A\f$. /// Possible values are `Eigen::ColMajor` and `Eigen::RowMajor`. /// \tparam FlagsB Additional flags for the matrix class of \f$B\f$. /// Possible values are `Eigen::ColMajor` and `Eigen::RowMajor`. /// \tparam StorageIndexA The storage index type of the \f$A\f$ matrix, only used when \f$A\f$ /// is a sparse matrix. /// \tparam StorageIndexB The storage index type of the \f$B\f$ matrix, only used when \f$B\f$ /// is a sparse matrix. /// template <typename Scalar_, typename TypeA = Eigen::Sparse, typename TypeB = Eigen::Sparse, int UploA = Eigen::Lower, int UploB = Eigen::Lower, int FlagsA = Eigen::ColMajor, int FlagsB = Eigen::ColMajor, typename StorageIndexA = int, typename StorageIndexB = int> class SymShiftInvert { public: /// /// Element type of the matrix. /// using Scalar = Scalar_; private: using Index = Eigen::Index; // Hypothetical type of the A matrix, either dense or sparse using DenseTypeA = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, FlagsA>; using SparseTypeA = Eigen::SparseMatrix<Scalar, FlagsA, StorageIndexA>; // Whether A is sparse using ASparse = std::is_same<TypeA, Eigen::Sparse>; // Actual type of the A matrix using MatrixA = typename std::conditional<ASparse::value, SparseTypeA, DenseTypeA>::type; // Hypothetical type of the B matrix, either dense or sparse using DenseTypeB = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, FlagsB>; using SparseTypeB = Eigen::SparseMatrix<Scalar, FlagsB, StorageIndexB>; // Whether B is sparse using BSparse = std::is_same<TypeB, Eigen::Sparse>; // Actual type of the B matrix using MatrixB = typename std::conditional<BSparse::value, SparseTypeB, DenseTypeB>::type; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using MapConstVec = Eigen::Map<const Vector>; using MapVec = Eigen::Map<Vector>; // The type of A-sigma*B if one of A and B is dense // DenseType = if (A is dense) MatrixA else MatrixB using DenseType = typename std::conditional<ASparse::value, MatrixB, MatrixA>::type; // The type of A-sigma*B // If both A and B are sparse, the result is MatrixA; otherwise the result is DenseType using ResType = typename std::conditional<ASparse::value && BSparse::value, MatrixA, DenseType>::type; // If both A and B are sparse, then the result A-sigma*B is sparse, so we use // sparseLU for factorization; otherwise A-sigma*B is dense, and we use BKLDLT using FacType = typename std::conditional< ASparse::value && BSparse::value, Eigen::SparseLU<ResType>, BKLDLT<Scalar>>::type; using ConstGenericMatrixA = const Eigen::Ref<const MatrixA>; using ConstGenericMatrixB = const Eigen::Ref<const MatrixB>; ConstGenericMatrixA m_matA; ConstGenericMatrixB m_matB; const Index m_n; FacType m_solver; public: /// /// Constructor to create the matrix operation object. /// /// \param A A dense or sparse matrix object, whose type can be `Eigen::Matrix<...>`, /// `Eigen::SparseMatrix<...>`, `Eigen::Map<Eigen::Matrix<...>>`, /// `Eigen::Map<Eigen::SparseMatrix<...>>`, `Eigen::Ref<Eigen::Matrix<...>>`, /// `Eigen::Ref<Eigen::SparseMatrix<...>>`, etc. /// \param B A dense or sparse matrix object. /// template <typename DerivedA, typename DerivedB> SymShiftInvert(const Eigen::EigenBase<DerivedA>& A, const Eigen::EigenBase<DerivedB>& B) : m_matA(A.derived()), m_matB(B.derived()), m_n(A.rows()) { static_assert( static_cast<int>(DerivedA::PlainObject::IsRowMajor) == static_cast<int>(MatrixA::IsRowMajor), "SymShiftInvert: the \"FlagsA\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)"); static_assert( static_cast<int>(DerivedB::PlainObject::IsRowMajor) == static_cast<int>(MatrixB::IsRowMajor), "SymShiftInvert: the \"FlagsB\" template parameter does not match the input matrix (Eigen::ColMajor/Eigen::RowMajor)"); if (m_n != A.cols() || m_n != B.rows() || m_n != B.cols()) throw std::invalid_argument("SymShiftInvert: A and B must be square matrices of the same size"); } /// /// Return the number of rows of the underlying matrix. /// Index rows() const { return m_n; } /// /// Return the number of columns of the underlying matrix. /// Index cols() const { return m_n; } /// /// Set the real shift \f$\sigma\f$. /// void set_shift(const Scalar& sigma) { constexpr bool AIsSparse = ASparse::value; constexpr bool BIsSparse = BSparse::value; using Helper = SymShiftInvertHelper<AIsSparse, BIsSparse, UploA, UploB>; const bool success = Helper::factorize(m_solver, m_matA, m_matB, sigma); if (!success) throw std::invalid_argument("SymShiftInvert: factorization failed with the given shift"); } /// /// Perform the shift-invert operation \f$y=(A-\sigma B)^{-1}x\f$. /// /// \param x_in Pointer to the \f$x\f$ vector. /// \param y_out Pointer to the \f$y\f$ vector. /// // y_out = inv(A - sigma * B) * x_in void perform_op(const Scalar* x_in, Scalar* y_out) const { MapConstVec x(x_in, m_n); MapVec y(y_out, m_n); y.noalias() = m_solver.solve(x); } }; } // namespace Spectra #endif // SPECTRA_SYM_SHIFT_INVERT_H
10,177
40.373984
131
h
abess
abess-master/include/Spectra/MatOp/internal/ArnoldiOp.h
// Copyright (C) 2018-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_ARNOLDI_OP_H #define SPECTRA_ARNOLDI_OP_H #include <Eigen/Core> #include <cmath> // std::sqrt namespace Spectra { /// /// \ingroup Internals /// @{ /// /// /// \defgroup Operators Operators /// /// Different types of operators. /// /// /// \ingroup Operators /// /// Operators used in the Arnoldi factorization. /// template <typename Scalar, typename OpType, typename BOpType> class ArnoldiOp { private: using Index = Eigen::Index; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; const OpType& m_op; const BOpType& m_Bop; mutable Vector m_cache; public: ArnoldiOp(const OpType& op, const BOpType& Bop) : m_op(op), m_Bop(Bop), m_cache(op.rows()) {} // Move constructor ArnoldiOp(ArnoldiOp&& other) : m_op(other.m_op), m_Bop(other.m_Bop) { // We emulate the move constructor for Vector using Vector::swap() m_cache.swap(other.m_cache); } inline Index rows() const { return m_op.rows(); } // In generalized eigenvalue problem Ax=lambda*Bx, define the inner product to be <x, y> = x'By. // For regular eigenvalue problems, it is the usual inner product <x, y> = x'y // Compute <x, y> = x'By // x and y are two vectors template <typename Arg1, typename Arg2> Scalar inner_product(const Arg1& x, const Arg2& y) const { m_Bop.perform_op(y.data(), m_cache.data()); return x.dot(m_cache); } // Compute res = <X, y> = X'By // X is a matrix, y is a vector, res is a vector template <typename Arg1, typename Arg2> void trans_product(const Arg1& x, const Arg2& y, Eigen::Ref<Vector> res) const { m_Bop.perform_op(y.data(), m_cache.data()); res.noalias() = x.transpose() * m_cache; } // B-norm of a vector, ||x||_B = sqrt(x'Bx) template <typename Arg> Scalar norm(const Arg& x) const { using std::sqrt; return sqrt(inner_product<Arg, Arg>(x, x)); } // The "A" operator to generate the Krylov subspace inline void perform_op(const Scalar* x_in, Scalar* y_out) const { m_op.perform_op(x_in, y_out); } }; /// /// \ingroup Operators /// /// Placeholder for the B-operator when \f$B = I\f$. /// class IdentityBOp {}; /// /// \ingroup Operators /// /// Partial specialization for the case \f$B = I\f$. /// template <typename Scalar, typename OpType> class ArnoldiOp<Scalar, OpType, IdentityBOp> { private: using Index = Eigen::Index; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; const OpType& m_op; public: ArnoldiOp(const OpType& op, const IdentityBOp& /*Bop*/) : m_op(op) {} inline Index rows() const { return m_op.rows(); } // Compute <x, y> = x'y // x and y are two vectors template <typename Arg1, typename Arg2> Scalar inner_product(const Arg1& x, const Arg2& y) const { return x.dot(y); } // Compute res = <X, y> = X'y // X is a matrix, y is a vector, res is a vector template <typename Arg1, typename Arg2> void trans_product(const Arg1& x, const Arg2& y, Eigen::Ref<Vector> res) const { res.noalias() = x.transpose() * y; } // B-norm of a vector. For regular eigenvalue problems it is simply the L2 norm template <typename Arg> Scalar norm(const Arg& x) const { return x.norm(); } // The "A" operator to generate the Krylov subspace inline void perform_op(const Scalar* x_in, Scalar* y_out) const { m_op.perform_op(x_in, y_out); } }; /// /// @} /// } // namespace Spectra #endif // SPECTRA_ARNOLDI_OP_H
3,901
23.540881
100
h
abess
abess-master/include/Spectra/MatOp/internal/SymGEigsBucklingOp.h
// Copyright (C) 2020-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_SYM_GEIGS_BUCKLING_OP_H #define SPECTRA_SYM_GEIGS_BUCKLING_OP_H #include <Eigen/Core> #include "../SymShiftInvert.h" #include "../SparseSymMatProd.h" namespace Spectra { /// /// \ingroup Operators /// /// This class defines the matrix operation for generalized eigen solver in the /// buckling mode. It computes \f$y=(K-\sigma K_G)^{-1}Kx\f$ for any /// vector \f$x\f$, where \f$K\f$ is positive definite, \f$K_G\f$ is symmetric, /// and \f$\sigma\f$ is a real shift. /// This class is intended for internal use. /// template <typename OpType = SymShiftInvert<double>, typename BOpType = SparseSymMatProd<double>> class SymGEigsBucklingOp { public: using Scalar = typename OpType::Scalar; private: using Index = Eigen::Index; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; OpType& m_op; const BOpType& m_Bop; mutable Vector m_cache; // temporary working space public: /// /// Constructor to create the matrix operation object. /// /// \param op The \f$(K-\sigma K_G)^{-1}\f$ matrix operation object. /// \param Bop The \f$K\f$ matrix operation object. /// SymGEigsBucklingOp(OpType& op, const BOpType& Bop) : m_op(op), m_Bop(Bop), m_cache(op.rows()) {} /// /// Move constructor. /// SymGEigsBucklingOp(SymGEigsBucklingOp&& other) : m_op(other.m_op), m_Bop(other.m_Bop) { // We emulate the move constructor for Vector using Vector::swap() m_cache.swap(other.m_cache); } /// /// Return the number of rows of the underlying matrix. /// Index rows() const { return m_op.rows(); } /// /// Return the number of columns of the underlying matrix. /// Index cols() const { return m_op.rows(); } /// /// Set the real shift \f$\sigma\f$. /// void set_shift(const Scalar& sigma) { m_op.set_shift(sigma); } /// /// Perform the matrix operation \f$y=(K-\sigma K_G)^{-1}Kx\f$. /// /// \param x_in Pointer to the \f$x\f$ vector. /// \param y_out Pointer to the \f$y\f$ vector. /// // y_out = inv(K - sigma * K_G) * K * x_in void perform_op(const Scalar* x_in, Scalar* y_out) const { m_Bop.perform_op(x_in, m_cache.data()); m_op.perform_op(m_cache.data(), y_out); } }; } // namespace Spectra #endif // SPECTRA_SYM_GEIGS_BUCKLING_OP_H
2,671
26.833333
79
h
abess
abess-master/include/Spectra/MatOp/internal/SymGEigsCayleyOp.h
// Copyright (C) 2020-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_SYM_GEIGS_CAYLEY_OP_H #define SPECTRA_SYM_GEIGS_CAYLEY_OP_H #include <Eigen/Core> #include "../SymShiftInvert.h" #include "../SparseSymMatProd.h" namespace Spectra { /// /// \ingroup Operators /// /// This class defines the matrix operation for generalized eigen solver in the /// Cayley mode. It computes \f$y=(A-\sigma B)^{-1}(A+\sigma B)x\f$ for any /// vector \f$x\f$, where \f$A\f$ is a symmetric matrix, \f$B\f$ is positive definite, /// and \f$\sigma\f$ is a real shift. /// This class is intended for internal use. /// template <typename OpType = SymShiftInvert<double>, typename BOpType = SparseSymMatProd<double>> class SymGEigsCayleyOp { public: using Scalar = typename OpType::Scalar; private: using Index = Eigen::Index; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using MapConstVec = Eigen::Map<const Vector>; using MapVec = Eigen::Map<Vector>; OpType& m_op; const BOpType& m_Bop; mutable Vector m_cache; // temporary working space Scalar m_sigma; public: /// /// Constructor to create the matrix operation object. /// /// \param op The \f$(A-\sigma B)^{-1}\f$ matrix operation object. /// \param Bop The \f$B\f$ matrix operation object. /// SymGEigsCayleyOp(OpType& op, const BOpType& Bop) : m_op(op), m_Bop(Bop), m_cache(op.rows()) {} /// /// Move constructor. /// SymGEigsCayleyOp(SymGEigsCayleyOp&& other) : m_op(other.m_op), m_Bop(other.m_Bop), m_sigma(other.m_sigma) { // We emulate the move constructor for Vector using Vector::swap() m_cache.swap(other.m_cache); } /// /// Return the number of rows of the underlying matrix. /// Index rows() const { return m_op.rows(); } /// /// Return the number of columns of the underlying matrix. /// Index cols() const { return m_op.rows(); } /// /// Set the real shift \f$\sigma\f$. /// void set_shift(const Scalar& sigma) { m_op.set_shift(sigma); m_sigma = sigma; } /// /// Perform the matrix operation \f$y=(A-\sigma B)^{-1}(A+\sigma B)x\f$. /// /// \param x_in Pointer to the \f$x\f$ vector. /// \param y_out Pointer to the \f$y\f$ vector. /// // y_out = inv(A - sigma * B) * (A + sigma * B) * x_in void perform_op(const Scalar* x_in, Scalar* y_out) const { // inv(A - sigma * B) * (A + sigma * B) * x // = inv(A - sigma * B) * (A - sigma * B + 2 * sigma * B) * x // = x + 2 * sigma * inv(A - sigma * B) * B * x m_Bop.perform_op(x_in, m_cache.data()); m_op.perform_op(m_cache.data(), y_out); MapConstVec x(x_in, this->rows()); MapVec y(y_out, this->rows()); y.noalias() = x + (Scalar(2) * m_sigma) * y; } }; } // namespace Spectra #endif // SPECTRA_SYM_GEIGS_CAYLEY_OP_H
3,163
28.849057
86
h
abess
abess-master/include/Spectra/MatOp/internal/SymGEigsCholeskyOp.h
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_SYM_GEIGS_CHOLESKY_OP_H #define SPECTRA_SYM_GEIGS_CHOLESKY_OP_H #include <Eigen/Core> #include "../DenseSymMatProd.h" #include "../DenseCholesky.h" namespace Spectra { /// /// \ingroup Operators /// /// This class defines the matrix operation for generalized eigen solver in the /// Cholesky decomposition mode. It calculates \f$y=L^{-1}A(L')^{-1}x\f$ for any /// vector \f$x\f$, where \f$L\f$ is the Cholesky decomposition of \f$B\f$. /// This class is intended for internal use. /// template <typename OpType = DenseSymMatProd<double>, typename BOpType = DenseCholesky<double>> class SymGEigsCholeskyOp { public: using Scalar = typename OpType::Scalar; private: using Index = Eigen::Index; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; const OpType& m_op; const BOpType& m_Bop; mutable Vector m_cache; // temporary working space public: /// /// Constructor to create the matrix operation object. /// /// \param op The \f$A\f$ matrix operation object. /// \param Bop The \f$B\f$ matrix operation object. /// SymGEigsCholeskyOp(const OpType& op, const BOpType& Bop) : m_op(op), m_Bop(Bop), m_cache(op.rows()) {} /// /// Move constructor. /// SymGEigsCholeskyOp(SymGEigsCholeskyOp&& other) : m_op(other.m_op), m_Bop(other.m_Bop) { // We emulate the move constructor for Vector using Vector::swap() m_cache.swap(other.m_cache); } /// /// Return the number of rows of the underlying matrix. /// Index rows() const { return m_Bop.rows(); } /// /// Return the number of columns of the underlying matrix. /// Index cols() const { return m_Bop.rows(); } /// /// Perform the matrix operation \f$y=L^{-1}A(L')^{-1}x\f$. /// /// \param x_in Pointer to the \f$x\f$ vector. /// \param y_out Pointer to the \f$y\f$ vector. /// // y_out = inv(L) * A * inv(L') * x_in void perform_op(const Scalar* x_in, Scalar* y_out) const { m_Bop.upper_triangular_solve(x_in, y_out); m_op.perform_op(y_out, m_cache.data()); m_Bop.lower_triangular_solve(m_cache.data(), y_out); } }; } // namespace Spectra #endif // SPECTRA_SYM_GEIGS_CHOLESKY_OP_H
2,548
27.965909
80
h
abess
abess-master/include/Spectra/MatOp/internal/SymGEigsRegInvOp.h
// Copyright (C) 2017-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_SYM_GEIGS_REG_INV_OP_H #define SPECTRA_SYM_GEIGS_REG_INV_OP_H #include <Eigen/Core> #include "../SparseSymMatProd.h" #include "../SparseRegularInverse.h" namespace Spectra { /// /// \ingroup Operators /// /// This class defines the matrix operation for generalized eigen solver in the /// regular inverse mode. This class is intended for internal use. /// template <typename OpType = SparseSymMatProd<double>, typename BOpType = SparseRegularInverse<double>> class SymGEigsRegInvOp { public: using Scalar = typename OpType::Scalar; private: using Index = Eigen::Index; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; const OpType& m_op; const BOpType& m_Bop; mutable Vector m_cache; // temporary working space public: /// /// Constructor to create the matrix operation object. /// /// \param op The \f$A\f$ matrix operation object. /// \param Bop The \f$B\f$ matrix operation object. /// SymGEigsRegInvOp(const OpType& op, const BOpType& Bop) : m_op(op), m_Bop(Bop), m_cache(op.rows()) {} /// /// Move constructor. /// SymGEigsRegInvOp(SymGEigsRegInvOp&& other) : m_op(other.m_op), m_Bop(other.m_Bop) { // We emulate the move constructor for Vector using Vector::swap() m_cache.swap(other.m_cache); } /// /// Return the number of rows of the underlying matrix. /// Index rows() const { return m_Bop.rows(); } /// /// Return the number of columns of the underlying matrix. /// Index cols() const { return m_Bop.rows(); } /// /// Perform the matrix operation \f$y=B^{-1}Ax\f$. /// /// \param x_in Pointer to the \f$x\f$ vector. /// \param y_out Pointer to the \f$y\f$ vector. /// // y_out = inv(B) * A * x_in void perform_op(const Scalar* x_in, Scalar* y_out) const { m_op.perform_op(x_in, m_cache.data()); m_Bop.solve(m_cache.data(), y_out); } }; } // namespace Spectra #endif // SPECTRA_SYM_GEIGS_REG_INV_OP_H
2,330
26.423529
79
h
abess
abess-master/include/Spectra/MatOp/internal/SymGEigsShiftInvertOp.h
// Copyright (C) 2020-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_SYM_GEIGS_SHIFT_INVERT_OP_H #define SPECTRA_SYM_GEIGS_SHIFT_INVERT_OP_H #include <Eigen/Core> #include "../SymShiftInvert.h" #include "../SparseSymMatProd.h" namespace Spectra { /// /// \ingroup Operators /// /// This class defines the matrix operation for generalized eigen solver in the /// shift-and-invert mode. It computes \f$y=(A-\sigma B)^{-1}Bx\f$ for any /// vector \f$x\f$, where \f$A\f$ is a symmetric matrix, \f$B\f$ is positive definite, /// and \f$\sigma\f$ is a real shift. /// This class is intended for internal use. /// template <typename OpType = SymShiftInvert<double>, typename BOpType = SparseSymMatProd<double>> class SymGEigsShiftInvertOp { public: using Scalar = typename OpType::Scalar; private: using Index = Eigen::Index; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; OpType& m_op; const BOpType& m_Bop; mutable Vector m_cache; // temporary working space public: /// /// Constructor to create the matrix operation object. /// /// \param op The \f$(A-\sigma B)^{-1}\f$ matrix operation object. /// \param Bop The \f$B\f$ matrix operation object. /// SymGEigsShiftInvertOp(OpType& op, const BOpType& Bop) : m_op(op), m_Bop(Bop), m_cache(op.rows()) {} /// /// Move constructor. /// SymGEigsShiftInvertOp(SymGEigsShiftInvertOp&& other) : m_op(other.m_op), m_Bop(other.m_Bop) { // We emulate the move constructor for Vector using Vector::swap() m_cache.swap(other.m_cache); } /// /// Return the number of rows of the underlying matrix. /// Index rows() const { return m_op.rows(); } /// /// Return the number of columns of the underlying matrix. /// Index cols() const { return m_op.rows(); } /// /// Set the real shift \f$\sigma\f$. /// void set_shift(const Scalar& sigma) { m_op.set_shift(sigma); } /// /// Perform the matrix operation \f$y=(A-\sigma B)^{-1}Bx\f$. /// /// \param x_in Pointer to the \f$x\f$ vector. /// \param y_out Pointer to the \f$y\f$ vector. /// // y_out = inv(A - sigma * B) * B * x_in void perform_op(const Scalar* x_in, Scalar* y_out) const { m_Bop.perform_op(x_in, m_cache.data()); m_op.perform_op(m_cache.data(), y_out); } }; } // namespace Spectra #endif // SPECTRA_SYM_GEIGS_SHIFT_INVERT_OP_H
2,702
27.15625
86
h
abess
abess-master/include/Spectra/Util/CompInfo.h
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_COMP_INFO_H #define SPECTRA_COMP_INFO_H namespace Spectra { /// /// \ingroup Enumerations /// /// The enumeration to report the status of computation. /// enum class CompInfo { Successful, ///< Computation was successful. NotComputed, ///< Used in eigen solvers, indicating that computation ///< has not been conducted. Users should call ///< the `compute()` member function of solvers. NotConverging, ///< Used in eigen solvers, indicating that some eigenvalues ///< did not converge. The `compute()` ///< function returns the number of converged eigenvalues. NumericalIssue ///< Used in various matrix factorization classes, for example in ///< Cholesky decomposition it indicates that the ///< matrix is not positive definite. }; } // namespace Spectra #endif // SPECTRA_COMP_INFO_H
1,213
31.810811
85
h
abess
abess-master/include/Spectra/Util/GEigsMode.h
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_GEIGS_MODE_H #define SPECTRA_GEIGS_MODE_H namespace Spectra { /// /// \ingroup Enumerations /// /// The enumeration to specify the mode of generalized eigenvalue solver. /// enum class GEigsMode { Cholesky, ///< Using Cholesky decomposition to solve generalized eigenvalues. RegularInverse, ///< Regular inverse mode for generalized eigenvalue solver. ShiftInvert, ///< Shift-and-invert mode for generalized eigenvalue solver. Buckling, ///< Buckling mode for generalized eigenvalue solver. Cayley ///< Cayley transformation mode for generalized eigenvalue solver. }; } // namespace Spectra #endif // SPECTRA_GEIGS_MODE_H
959
32.103448
88
h
abess
abess-master/include/Spectra/Util/SelectionRule.h
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_SELECTION_RULE_H #define SPECTRA_SELECTION_RULE_H #include <vector> // std::vector #include <cmath> // std::abs #include <algorithm> // std::sort #include <complex> // std::complex #include <utility> // std::pair #include <stdexcept> // std::invalid_argument #include <Eigen/Core> #include "TypeTraits.h" namespace Spectra { /// /// \defgroup Enumerations Enumerations /// /// Enumeration types for the selection rule of eigenvalues. /// /// /// \ingroup Enumerations /// /// The enumeration of selection rules of desired eigenvalues. /// enum class SortRule { LargestMagn, ///< Select eigenvalues with largest magnitude. Magnitude ///< means the absolute value for real numbers and norm for ///< complex numbers. Applies to both symmetric and general ///< eigen solvers. LargestReal, ///< Select eigenvalues with largest real part. Only for general eigen solvers. LargestImag, ///< Select eigenvalues with largest imaginary part (in magnitude). Only for general eigen solvers. LargestAlge, ///< Select eigenvalues with largest algebraic value, considering ///< any negative sign. Only for symmetric eigen solvers. SmallestMagn, ///< Select eigenvalues with smallest magnitude. Applies to both symmetric and general ///< eigen solvers. SmallestReal, ///< Select eigenvalues with smallest real part. Only for general eigen solvers. SmallestImag, ///< Select eigenvalues with smallest imaginary part (in magnitude). Only for general eigen solvers. SmallestAlge, ///< Select eigenvalues with smallest algebraic value. Only for symmetric eigen solvers. BothEnds ///< Select eigenvalues half from each end of the spectrum. When ///< `nev` is odd, compute more from the high end. Only for symmetric eigen solvers. }; /// \cond // When comparing eigenvalues, we first calculate the "target" to sort. // For example, if we want to choose the eigenvalues with // largest magnitude, the target will be -abs(x). // The minus sign is due to the fact that std::sort() sorts in ascending order. // Default target: throw an exception template <typename Scalar, SortRule Rule> class SortingTarget { public: static ElemType<Scalar> get(const Scalar& val) { using std::abs; throw std::invalid_argument("incompatible selection rule"); return -abs(val); } }; // Specialization for SortRule::LargestMagn // This covers [float, double, complex] x [SortRule::LargestMagn] template <typename Scalar> class SortingTarget<Scalar, SortRule::LargestMagn> { public: static ElemType<Scalar> get(const Scalar& val) { using std::abs; return -abs(val); } }; // Specialization for SortRule::LargestReal // This covers [complex] x [SortRule::LargestReal] template <typename RealType> class SortingTarget<std::complex<RealType>, SortRule::LargestReal> { public: static RealType get(const std::complex<RealType>& val) { return -val.real(); } }; // Specialization for SortRule::LargestImag // This covers [complex] x [SortRule::LargestImag] template <typename RealType> class SortingTarget<std::complex<RealType>, SortRule::LargestImag> { public: static RealType get(const std::complex<RealType>& val) { using std::abs; return -abs(val.imag()); } }; // Specialization for SortRule::LargestAlge // This covers [float, double] x [SortRule::LargestAlge] template <typename Scalar> class SortingTarget<Scalar, SortRule::LargestAlge> { public: static Scalar get(const Scalar& val) { return -val; } }; // Here SortRule::BothEnds is the same as SortRule::LargestAlge, but // we need some additional steps, which are done in // SymEigsSolver.h => retrieve_ritzpair(). // There we move the smallest values to the proper locations. template <typename Scalar> class SortingTarget<Scalar, SortRule::BothEnds> { public: static Scalar get(const Scalar& val) { return -val; } }; // Specialization for SortRule::SmallestMagn // This covers [float, double, complex] x [SortRule::SmallestMagn] template <typename Scalar> class SortingTarget<Scalar, SortRule::SmallestMagn> { public: static ElemType<Scalar> get(const Scalar& val) { using std::abs; return abs(val); } }; // Specialization for SortRule::SmallestReal // This covers [complex] x [SortRule::SmallestReal] template <typename RealType> class SortingTarget<std::complex<RealType>, SortRule::SmallestReal> { public: static RealType get(const std::complex<RealType>& val) { return val.real(); } }; // Specialization for SortRule::SmallestImag // This covers [complex] x [SortRule::SmallestImag] template <typename RealType> class SortingTarget<std::complex<RealType>, SortRule::SmallestImag> { public: static RealType get(const std::complex<RealType>& val) { using std::abs; return abs(val.imag()); } }; // Specialization for SortRule::SmallestAlge // This covers [float, double] x [SortRule::SmallestAlge] template <typename Scalar> class SortingTarget<Scalar, SortRule::SmallestAlge> { public: static Scalar get(const Scalar& val) { return val; } }; // Sort eigenvalues template <typename T, SortRule Rule> class SortEigenvalue { private: using Index = Eigen::Index; using IndexArray = std::vector<Index>; const T* m_evals; IndexArray m_index; public: // Sort indices according to the eigenvalues they point to inline bool operator()(Index i, Index j) { return SortingTarget<T, Rule>::get(m_evals[i]) < SortingTarget<T, Rule>::get(m_evals[j]); } SortEigenvalue(const T* start, Index size) : m_evals(start), m_index(size) { for (Index i = 0; i < size; i++) { m_index[i] = i; } std::sort(m_index.begin(), m_index.end(), *this); } inline IndexArray index() const { return m_index; } inline void swap(IndexArray& other) { m_index.swap(other); } }; // Sort values[:len] according to the selection rule, and return the indices template <typename Scalar> std::vector<Eigen::Index> argsort(SortRule selection, const Eigen::Matrix<Scalar, Eigen::Dynamic, 1>& values, Eigen::Index len) { using Index = Eigen::Index; // Sort Ritz values and put the wanted ones at the beginning std::vector<Index> ind; switch (selection) { case SortRule::LargestMagn: { SortEigenvalue<Scalar, SortRule::LargestMagn> sorting(values.data(), len); sorting.swap(ind); break; } case SortRule::BothEnds: case SortRule::LargestAlge: { SortEigenvalue<Scalar, SortRule::LargestAlge> sorting(values.data(), len); sorting.swap(ind); break; } case SortRule::SmallestMagn: { SortEigenvalue<Scalar, SortRule::SmallestMagn> sorting(values.data(), len); sorting.swap(ind); break; } case SortRule::SmallestAlge: { SortEigenvalue<Scalar, SortRule::SmallestAlge> sorting(values.data(), len); sorting.swap(ind); break; } default: throw std::invalid_argument("unsupported selection rule"); } // For SortRule::BothEnds, the eigenvalues are sorted according to the // SortRule::LargestAlge rule, so we need to move those smallest values to the left // The order would be // Largest => Smallest => 2nd largest => 2nd smallest => ... // We keep this order since the first k values will always be // the wanted collection, no matter k is nev_updated (used in SymEigsBase::restart()) // or is nev (used in SymEigsBase::sort_ritzpair()) if (selection == SortRule::BothEnds) { std::vector<Index> ind_copy(ind); for (Index i = 0; i < len; i++) { // If i is even, pick values from the left (large values) // If i is odd, pick values from the right (small values) if (i % 2 == 0) ind[i] = ind_copy[i / 2]; else ind[i] = ind_copy[len - 1 - i / 2]; } } return ind; } // Default vector length template <typename Scalar> std::vector<Eigen::Index> argsort(SortRule selection, const Eigen::Matrix<Scalar, Eigen::Dynamic, 1>& values) { return argsort<Scalar>(selection, values, values.size()); } /// \endcond } // namespace Spectra #endif // SPECTRA_SELECTION_RULE_H
8,908
28.598007
127
h
abess
abess-master/include/Spectra/Util/SimpleRandom.h
// Copyright (C) 2016-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_SIMPLE_RANDOM_H #define SPECTRA_SIMPLE_RANDOM_H #include <Eigen/Core> /// \cond namespace Spectra { // We need a simple pseudo random number generator here: // 1. It is used to generate initial and restarted residual vector. // 2. It is not necessary to be so "random" and advanced. All we hope // is that the residual vector is not in the space spanned by the // current Krylov space. This should be met almost surely. // 3. We don't want to call RNG in C++, since we actually want the // algorithm to be deterministic. Also, calling RNG in C/C++ is not // allowed in R packages submitted to CRAN. // 4. The method should be as simple as possible, so an LCG is enough. // 5. Based on public domain code by Ray Gardner // http://stjarnhimlen.se/snippets/rg_rand.c template <typename Scalar = double> class SimpleRandom { private: using Index = Eigen::Index; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; static constexpr unsigned int m_a = 16807; // multiplier static constexpr unsigned long m_max = 2147483647L; // 2^31 - 1 long m_rand; // RNG state inline long next_long_rand(long seed) const { unsigned long lo, hi; lo = m_a * (long) (seed & 0xFFFF); hi = m_a * (long) ((unsigned long) seed >> 16); lo += (hi & 0x7FFF) << 16; if (lo > m_max) { lo &= m_max; ++lo; } lo += hi >> 15; if (lo > m_max) { lo &= m_max; ++lo; } return (long) lo; } public: SimpleRandom(unsigned long init_seed) : m_rand(init_seed ? (init_seed & m_max) : 1) {} // Return a single random number, ranging from -0.5 to 0.5 Scalar random() { m_rand = next_long_rand(m_rand); return Scalar(m_rand) / Scalar(m_max) - Scalar(0.5); } // Fill the given vector with random numbers // Ranging from -0.5 to 0.5 void random_vec(Vector& vec) { const Index len = vec.size(); for (Index i = 0; i < len; i++) { m_rand = next_long_rand(m_rand); vec[i] = Scalar(m_rand); } vec.array() = vec.array() / Scalar(m_max) - Scalar(0.5); } // Return a vector of random numbers // Ranging from -0.5 to 0.5 Vector random_vec(const Index len) { Vector res(len); random_vec(res); return res; } }; } // namespace Spectra /// \endcond #endif // SPECTRA_SIMPLE_RANDOM_H
2,841
27.42
70
h
abess
abess-master/include/Spectra/Util/TypeTraits.h
// Copyright (C) 2018-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_TYPE_TRAITS_H #define SPECTRA_TYPE_TRAITS_H #include <Eigen/Core> #include <limits> /// \cond // Clang-Format will have unintended effects: // static constexpr Scalar(min)() // So we turn it off here // // clang-format off namespace Spectra { // For a real value type "Scalar", we want to know its smallest // positive value, i.e., std::numeric_limits<Scalar>::min(). // However, we must take non-standard value types into account, // so we rely on Eigen::NumTraits. // // Eigen::NumTraits has defined epsilon() and lowest(), but // lowest() means negative highest(), which is a very small // negative value. // // Therefore, we manually define this limit, and use eplison()^3 // to mimic it for non-standard types. // Generic definition template <typename Scalar> struct TypeTraits { static constexpr Scalar epsilon() { return Eigen::numext::numeric_limits<Scalar>::epsilon(); } static constexpr Scalar (min)() { return epsilon() * epsilon() * epsilon(); } }; // Full specialization template <> struct TypeTraits<float> { static constexpr float epsilon() { return std::numeric_limits<float>::epsilon(); } static constexpr float (min)() { return (std::numeric_limits<float>::min)(); } }; template <> struct TypeTraits<double> { static constexpr double epsilon() { return std::numeric_limits<double>::epsilon(); } static constexpr double (min)() { return (std::numeric_limits<double>::min)(); } }; template <> struct TypeTraits<long double> { static constexpr long double epsilon() { return std::numeric_limits<long double>::epsilon(); } static constexpr long double (min)() { return (std::numeric_limits<long double>::min)(); } }; // Get the element type of a "scalar" // ElemType<double> => double // ElemType<std::complex<double>> => double template <typename T> using ElemType = typename Eigen::NumTraits<T>::Real; } // namespace Spectra /// \endcond #endif // SPECTRA_TYPE_TRAITS_H
2,365
22.66
70
h
abess
abess-master/include/Spectra/Util/Version.h
// Copyright (C) 2020-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_VERSION_H #define SPECTRA_VERSION_H #define SPECTRA_MAJOR_VERSION 1 #define SPECTRA_MINOR_VERSION 0 #define SPECTRA_PATCH_VERSION 0 #define SPECTRA_VERSION (SPECTRA_MAJOR_VERSION * 10000 + SPECTRA_MINOR_VERSION * 100 + SPECTRA_PATCH_VERSION) #endif // SPECTRA_VERSION_H
556
31.764706
109
h
abess
abess-master/include/Spectra/contrib/LOBPCGSolver.h
// Written by Anna Araslanova // Modified by Yixuan Qiu // License: MIT #ifndef SPECTRA_LOBPCG_SOLVER_H #define SPECTRA_LOBPCG_SOLVER_H #include <functional> #include <map> #include <Eigen/Core> #include <Eigen/SparseCore> #include <Eigen/Eigenvalues> #include <Eigen/SVD> #include <Eigen/SparseCholesky> #include "../SymGEigsSolver.h" namespace Spectra { /// /// \ingroup EigenSolver /// /// *** METHOD /// The class represent the LOBPCG algorithm, which was invented by Andrew Knyazev /// Theoretical background of the procedure can be found in the articles below /// - Knyazev, A.V., 2001. Toward the optimal preconditioned eigensolver : Locally optimal block preconditioned conjugate gradient method.SIAM journal on scientific computing, 23(2), pp.517 - 541. /// - Knyazev, A.V., Argentati, M.E., Lashuk, I. and Ovtchinnikov, E.E., 2007. Block locally optimal preconditioned eigenvalue xolvers(BLOPEX) in HYPRE and PETSc.SIAM Journal on Scientific Computing, 29(5), pp.2224 - 2239. /// /// *** CONDITIONS OF USE /// Locally Optimal Block Preconditioned Conjugate Gradient(LOBPCG) is a method for finding the M smallest eigenvalues /// and eigenvectors of a large symmetric positive definite generalized eigenvalue problem /// \f$Ax=\lambda Bx,\f$ /// where \f$A_{NxN}\f$ is a symmetric matrix, \f$B\f$ is symmetric and positive - definite. \f$A and B\f$ are also assumed large and sparse /// \f$\textit{M}\f$ should be \f$\<< textit{N}\f$ (at least \f$\textit{5M} < \textit{N} \f$) /// /// *** ARGUMENTS /// Eigen::SparseMatrix<long double> A; // N*N - Ax = lambda*Bx, lrage and sparse /// Eigen::SparseMatrix<long double> X; // N*M - initial approximations to eigenvectors (random in general case) /// Spectra::LOBPCGSolver<long double> solver(A, X); /// *Eigen::SparseMatrix<long double> B; // N*N - Ax = lambda*Bx, sparse, positive definite /// solver.setConstraints(B); /// *Eigen::SparseMatrix<long double> Y; // N*K - constraints, already found eigenvectors /// solver.setB(B); /// *Eigen::SparseMatrix<long double> T; // N*N - preconditioner ~ A^-1 /// solver.setPreconditioner(T); /// /// *** OUTCOMES /// solver.solve(); // compute eigenpairs // void /// solver.info(); // state of converjance // int /// solver.residuals(); // get residuals to evaluate biases // Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> /// solver.eigenvalues(); // get eigenvalues // Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> /// solver.eigenvectors(); // get eigenvectors // Eigen::Matrix<Scalar, Eigen::Dynamic, 1> /// /// *** EXAMPLE /// \code{.cpp} /// #include <Spectra/contrib/SymSparseEigsSolverLOBPCG.h> /// /// // random A /// Matrix a; /// a = (Matrix::Random(10, 10).array() > 0.6).cast<long double>() * Matrix::Random(10, 10).array() * 5; /// a = Matrix((a).triangularView<Eigen::Lower>()) + Matrix((a).triangularView<Eigen::Lower>()).transpose(); /// for (int i = 0; i < 10; i++) /// a(i, i) = i + 0.5; /// std::cout << a << "\n"; /// Eigen::SparseMatrix<long double> A(a.sparseView()); /// // random X /// Eigen::Matrix<long double, 10, 2> x; /// x = Matrix::Random(10, 2).array(); /// Eigen::SparseMatrix<long double> X(x.sparseView()); /// // solve Ax = lambda*x /// Spectra::LOBPCGSolver<long double> solver(A, X); /// solver.compute(10, 1e-4); // 10 iterations, L2_tolerance = 1e-4*N /// std::cout << "info\n" << solver.info() << std::endl; /// std::cout << "eigenvalues\n" << solver.eigenvalues() << std::endl; /// std::cout << "eigenvectors\n" << solver.eigenvectors() << std::endl; /// std::cout << "residuals\n" << solver.residuals() << std::endl; /// \endcode /// template <typename Scalar = long double> class LOBPCGSolver { private: typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix; typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector; typedef std::complex<Scalar> Complex; typedef Eigen::Matrix<Complex, Eigen::Dynamic, Eigen::Dynamic> ComplexMatrix; typedef Eigen::Matrix<Complex, Eigen::Dynamic, 1> ComplexVector; typedef Eigen::SparseMatrix<Scalar> SparseMatrix; typedef Eigen::SparseMatrix<Complex> SparseComplexMatrix; const int m_n; // dimension of matrix A const int m_nev; // number of eigenvalues requested SparseMatrix A, X; SparseMatrix m_Y, m_B, m_preconditioner; bool flag_with_constraints, flag_with_B, flag_with_preconditioner; public: SparseMatrix m_residuals; Matrix m_evectors; Vector m_evalues; int m_info; private: // B-orthonormalize matrix M int orthogonalizeInPlace(SparseMatrix& M, SparseMatrix& B, SparseMatrix& true_BM, bool has_true_BM = false) { SparseMatrix BM; if (has_true_BM == false) { if (flag_with_B) { BM = B * M; } else { BM = M; } } else { BM = true_BM; } Eigen::SimplicialLDLT<SparseMatrix> chol_MBM(M.transpose() * BM); if (chol_MBM.info() != Eigen::Success) { // LDLT decomposition fail m_info = chol_MBM.info(); return chol_MBM.info(); } SparseComplexMatrix Upper_MBM = chol_MBM.matrixU().template cast<Complex>(); ComplexVector D_MBM_vec = chol_MBM.vectorD().template cast<Complex>(); D_MBM_vec = D_MBM_vec.cwiseSqrt(); for (int i = 0; i < D_MBM_vec.rows(); i++) { D_MBM_vec(i) = Complex(1.0, 0.0) / D_MBM_vec(i); } SparseComplexMatrix D_MBM_mat(D_MBM_vec.asDiagonal()); SparseComplexMatrix U_inv(Upper_MBM.rows(), Upper_MBM.cols()); U_inv.setIdentity(); Upper_MBM.template triangularView<Eigen::Upper>().solveInPlace(U_inv); SparseComplexMatrix right_product = U_inv * D_MBM_mat; M = M * right_product.real(); if (flag_with_B) { true_BM = B * M; } else { true_BM = M; } return Eigen::Success; } void applyConstraintsInPlace(SparseMatrix& X, SparseMatrix& Y, SparseMatrix& B) { SparseMatrix BY; if (flag_with_B) { BY = B * Y; } else { BY = Y; } SparseMatrix YBY = Y.transpose() * BY; SparseMatrix BYX = BY.transpose() * X; SparseMatrix YBY_XYX = (Matrix(YBY).bdcSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(Matrix(BYX))).sparseView(); X = X - Y * YBY_XYX; } /* return 'AB CD' */ Matrix stack_4_matricies(Matrix A, Matrix B, Matrix C, Matrix D) { Matrix result(A.rows() + C.rows(), A.cols() + B.cols()); result.topLeftCorner(A.rows(), A.cols()) = A; result.topRightCorner(B.rows(), B.cols()) = B; result.bottomLeftCorner(C.rows(), C.cols()) = C; result.bottomRightCorner(D.rows(), D.cols()) = D; return result; } Matrix stack_9_matricies(Matrix A, Matrix B, Matrix C, Matrix D, Matrix E, Matrix F, Matrix G, Matrix H, Matrix I) { Matrix result(A.rows() + D.rows() + G.rows(), A.cols() + B.cols() + C.cols()); result.block(0, 0, A.rows(), A.cols()) = A; result.block(0, A.cols(), B.rows(), B.cols()) = B; result.block(0, A.cols() + B.cols(), C.rows(), C.cols()) = C; result.block(A.rows(), 0, D.rows(), D.cols()) = D; result.block(A.rows(), A.cols(), E.rows(), E.cols()) = E; result.block(A.rows(), A.cols() + B.cols(), F.rows(), F.cols()) = F; result.block(A.rows() + D.rows(), 0, G.rows(), G.cols()) = G; result.block(A.rows() + D.rows(), A.cols(), H.rows(), H.cols()) = H; result.block(A.rows() + D.rows(), A.cols() + B.cols(), I.rows(), I.cols()) = I; return result; } void sort_epairs(Vector& evalues, Matrix& evectors, SortRule SelectionRule) { std::function<bool(Scalar, Scalar)> cmp; if (SelectionRule == SortRule::SmallestAlge) cmp = std::less<Scalar>{}; else cmp = std::greater<Scalar>{}; std::map<Scalar, Vector, decltype(cmp)> epairs(cmp); for (int i = 0; i < m_evectors.cols(); ++i) epairs.insert(std::make_pair(evalues(i), evectors.col(i))); int i = 0; for (auto& epair : epairs) { evectors.col(i) = epair.second; evalues(i) = epair.first; i++; } } void removeColumns(SparseMatrix& matrix, std::vector<int>& colToRemove) { // remove columns through matrix multiplication SparseMatrix new_matrix(matrix.cols(), matrix.cols() - int(colToRemove.size())); int iCol = 0; std::vector<Eigen::Triplet<Scalar>> tripletList; tripletList.reserve(matrix.cols() - int(colToRemove.size())); for (int iRow = 0; iRow < matrix.cols(); iRow++) { if (std::find(colToRemove.begin(), colToRemove.end(), iRow) == colToRemove.end()) { tripletList.push_back(Eigen::Triplet<Scalar>(iRow, iCol, 1)); iCol++; } } new_matrix.setFromTriplets(tripletList.begin(), tripletList.end()); matrix = matrix * new_matrix; } int checkConvergence_getBlocksize(SparseMatrix& m_residuals, Scalar tolerance_L2, std::vector<int>& columnsToDelete) { // square roots from sum of squares by column int BlockSize = m_nev; Scalar sum, buffer; for (int iCol = 0; iCol < m_nev; iCol++) { sum = 0; for (int iRow = 0; iRow < m_n; iRow++) { buffer = m_residuals.coeff(iRow, iCol); sum += buffer * buffer; } if (sqrt(sum) < tolerance_L2) { BlockSize--; columnsToDelete.push_back(iCol); } } return BlockSize; } public: LOBPCGSolver(const SparseMatrix& A, const SparseMatrix X) : m_n(A.rows()), m_nev(X.cols()), A(A), X(X), flag_with_constraints(false), flag_with_B(false), flag_with_preconditioner(false), m_info(Eigen::InvalidInput) { if (A.rows() != X.rows() || A.rows() != A.cols()) throw std::invalid_argument("Wrong size"); //if (m_n < 5* m_nev) // throw std::invalid_argument("The problem size is small compared to the block size. Use standard eigensolver"); } void setConstraints(const SparseMatrix& Y) { m_Y = Y; flag_with_constraints = true; } void setB(const SparseMatrix& B) { if (B.rows() != A.rows() || B.cols() != A.cols()) throw std::invalid_argument("Wrong size"); m_B = B; flag_with_B = true; } void setPreconditioner(const SparseMatrix& preconditioner) { m_preconditioner = preconditioner; flag_with_preconditioner = true; } void compute(int maxit = 10, Scalar tol_div_n = 1e-7) { Scalar tolerance_L2 = tol_div_n * m_n; int BlockSize; int max_iter = std::min(m_n, maxit); SparseMatrix directions, AX, AR, BX, AD, ADD, DD, BDD, BD, XAD, RAD, DAD, XBD, RBD, BR, sparse_eVecX, sparse_eVecR, sparse_eVecD, inverse_matrix; Matrix XAR, RAR, XBR, gramA, gramB, eVecX, eVecR, eVecD; std::vector<int> columnsToDelete; if (flag_with_constraints) { // Apply the constraints Y to X applyConstraintsInPlace(X, m_Y, m_B); } // Make initial vectors orthonormal // implicit BX declaration if (orthogonalizeInPlace(X, m_B, BX) != Eigen::Success) { max_iter = 0; } AX = A * X; // Solve the following NxN eigenvalue problem for all N eigenvalues and -vectors: // first approximation via a dense problem Eigen::EigenSolver<Matrix> eigs(Matrix(X.transpose() * AX)); if (eigs.info() != Eigen::Success) { m_info = eigs.info(); max_iter = 0; } else { m_evalues = eigs.eigenvalues().real(); m_evectors = eigs.eigenvectors().real(); sort_epairs(m_evalues, m_evectors, SortRule::SmallestAlge); sparse_eVecX = m_evectors.sparseView(); X = X * sparse_eVecX; AX = AX * sparse_eVecX; BX = BX * sparse_eVecX; } for (int iter_num = 0; iter_num < max_iter; iter_num++) { m_residuals.resize(m_n, m_nev); for (int i = 0; i < m_nev; i++) { m_residuals.col(i) = AX.col(i) - m_evalues(i) * BX.col(i); } BlockSize = checkConvergence_getBlocksize(m_residuals, tolerance_L2, columnsToDelete); if (BlockSize == 0) { m_info = Eigen::Success; break; } // substitution of the original active mask if (columnsToDelete.size() > 0) { removeColumns(m_residuals, columnsToDelete); if (iter_num > 0) { removeColumns(directions, columnsToDelete); removeColumns(AD, columnsToDelete); removeColumns(BD, columnsToDelete); } columnsToDelete.clear(); // for next iteration } if (flag_with_preconditioner) { // Apply the preconditioner to the residuals m_residuals = m_preconditioner * m_residuals; } if (flag_with_constraints) { // Apply the constraints Y to residuals applyConstraintsInPlace(m_residuals, m_Y, m_B); } if (orthogonalizeInPlace(m_residuals, m_B, BR) != Eigen::Success) { break; } AR = A * m_residuals; // Orthonormalize conjugate directions if (iter_num > 0) { if (orthogonalizeInPlace(directions, m_B, BD, true) != Eigen::Success) { break; } AD = A * directions; } // Perform the Rayleigh Ritz Procedure XAR = Matrix(X.transpose() * AR); RAR = Matrix(m_residuals.transpose() * AR); XBR = Matrix(X.transpose() * BR); if (iter_num > 0) { XAD = X.transpose() * AD; RAD = m_residuals.transpose() * AD; DAD = directions.transpose() * AD; XBD = X.transpose() * BD; RBD = m_residuals.transpose() * BD; gramA = stack_9_matricies(m_evalues.asDiagonal(), XAR, XAD, XAR.transpose(), RAR, RAD, XAD.transpose(), RAD.transpose(), DAD.transpose()); gramB = stack_9_matricies(Matrix::Identity(m_nev, m_nev), XBR, XBD, XBR.transpose(), Matrix::Identity(BlockSize, BlockSize), RBD, XBD.transpose(), RBD.transpose(), Matrix::Identity(BlockSize, BlockSize)); } else { gramA = stack_4_matricies(m_evalues.asDiagonal(), XAR, XAR.transpose(), RAR); gramB = stack_4_matricies(Matrix::Identity(m_nev, m_nev), XBR, XBR.transpose(), Matrix::Identity(BlockSize, BlockSize)); } // Calculate the lowest/largest m eigenpairs; Solve the generalized eigenvalue problem. DenseSymMatProd<Scalar> Aop(gramA); DenseCholesky<Scalar> Bop(gramB); SymGEigsSolver<DenseSymMatProd<Scalar>, DenseCholesky<Scalar>, GEigsMode::Cholesky> geigs(Aop, Bop, m_nev, (std::min)(10, int(gramA.rows()) - 1)); geigs.init(); geigs.compute(SortRule::SmallestAlge); // Mat evecs if (geigs.info() == CompInfo::Successful) { m_evalues = geigs.eigenvalues(); m_evectors = geigs.eigenvectors(); sort_epairs(m_evalues, m_evectors, SortRule::SmallestAlge); } else { // Problem With General EgenVec m_info = Eigen::NoConvergence; break; } // Compute Ritz vectors if (iter_num > 0) { eVecX = m_evectors.block(0, 0, m_nev, m_nev); eVecR = m_evectors.block(m_nev, 0, BlockSize, m_nev); eVecD = m_evectors.block(m_nev + BlockSize, 0, BlockSize, m_nev); sparse_eVecX = eVecX.sparseView(); sparse_eVecR = eVecR.sparseView(); sparse_eVecD = eVecD.sparseView(); DD = m_residuals * sparse_eVecR; // new conjugate directions ADD = AR * sparse_eVecR; BDD = BR * sparse_eVecR; DD = DD + directions * sparse_eVecD; ADD = ADD + AD * sparse_eVecD; BDD = BDD + BD * sparse_eVecD; } else { eVecX = m_evectors.block(0, 0, m_nev, m_nev); eVecR = m_evectors.block(m_nev, 0, BlockSize, m_nev); sparse_eVecX = eVecX.sparseView(); sparse_eVecR = eVecR.sparseView(); DD = m_residuals * sparse_eVecR; ADD = AR * sparse_eVecR; BDD = BR * sparse_eVecR; } X = X * sparse_eVecX + DD; AX = AX * sparse_eVecX + ADD; BX = BX * sparse_eVecX + BDD; directions = DD; AD = ADD; BD = BDD; } // iteration loop // calculate last residuals m_residuals.resize(m_n, m_nev); for (int i = 0; i < m_nev; i++) { m_residuals.col(i) = AX.col(i) - m_evalues(i) * BX.col(i); } BlockSize = checkConvergence_getBlocksize(m_residuals, tolerance_L2, columnsToDelete); if (BlockSize == 0) { m_info = Eigen::Success; } } // compute Vector eigenvalues() { return m_evalues; } Matrix eigenvectors() { return m_evectors; } Matrix residuals() { return Matrix(m_residuals); } int info() { return m_info; } }; } // namespace Spectra #endif // SPECTRA_LOBPCG_SOLVER_H
18,661
32.807971
222
h
abess
abess-master/include/Spectra/contrib/PartialSVDSolver.h
// Copyright (C) 2018-2021 Yixuan Qiu <yixuan.qiu@cos.name> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at https://mozilla.org/MPL/2.0/. #ifndef SPECTRA_PARTIAL_SVD_SOLVER_H #define SPECTRA_PARTIAL_SVD_SOLVER_H #include <Eigen/Core> #include "../SymEigsSolver.h" namespace Spectra { // Abstract class for matrix operation template <typename Scalar_> class SVDMatOp { public: using Scalar = Scalar_; private: using Index = Eigen::Index; public: virtual Index rows() const = 0; virtual Index cols() const = 0; // y_out = A' * A * x_in or y_out = A * A' * x_in virtual void perform_op(const Scalar* x_in, Scalar* y_out) const = 0; virtual ~SVDMatOp() {} }; // Operation of a tall matrix in SVD // We compute the eigenvalues of A' * A // MatrixType is either Eigen::Matrix<Scalar, ...> or Eigen::SparseMatrix<Scalar, ...> template <typename Scalar, typename MatrixType> class SVDTallMatOp : public SVDMatOp<Scalar> { private: using Index = Eigen::Index; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using MapConstVec = Eigen::Map<const Vector>; using MapVec = Eigen::Map<Vector>; using ConstGenericMatrix = const Eigen::Ref<const MatrixType>; ConstGenericMatrix m_mat; const Index m_dim; mutable Vector m_cache; public: // Constructor SVDTallMatOp(ConstGenericMatrix& mat) : m_mat(mat), m_dim((std::min)(mat.rows(), mat.cols())), m_cache(mat.rows()) {} // These are the rows and columns of A' * A Index rows() const override { return m_dim; } Index cols() const override { return m_dim; } // y_out = A' * A * x_in void perform_op(const Scalar* x_in, Scalar* y_out) const override { MapConstVec x(x_in, m_mat.cols()); MapVec y(y_out, m_mat.cols()); m_cache.noalias() = m_mat * x; y.noalias() = m_mat.transpose() * m_cache; } }; // Operation of a wide matrix in SVD // We compute the eigenvalues of A * A' // MatrixType is either Eigen::Matrix<Scalar, ...> or Eigen::SparseMatrix<Scalar, ...> template <typename Scalar, typename MatrixType> class SVDWideMatOp : public SVDMatOp<Scalar> { private: using Index = Eigen::Index; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using MapConstVec = Eigen::Map<const Vector>; using MapVec = Eigen::Map<Vector>; using ConstGenericMatrix = const Eigen::Ref<const MatrixType>; ConstGenericMatrix m_mat; const Index m_dim; mutable Vector m_cache; public: // Constructor SVDWideMatOp(ConstGenericMatrix& mat) : m_mat(mat), m_dim((std::min)(mat.rows(), mat.cols())), m_cache(mat.cols()) {} // These are the rows and columns of A * A' Index rows() const override { return m_dim; } Index cols() const override { return m_dim; } // y_out = A * A' * x_in void perform_op(const Scalar* x_in, Scalar* y_out) const override { MapConstVec x(x_in, m_mat.rows()); MapVec y(y_out, m_mat.rows()); m_cache.noalias() = m_mat.transpose() * x; y.noalias() = m_mat * m_cache; } }; // Partial SVD solver // MatrixType is either Eigen::Matrix<Scalar, ...> or Eigen::SparseMatrix<Scalar, ...> template <typename MatrixType = Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>> class PartialSVDSolver { private: using Scalar = typename MatrixType::Scalar; using Index = Eigen::Index; using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>; using Vector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using ConstGenericMatrix = const Eigen::Ref<const MatrixType>; ConstGenericMatrix m_mat; const Index m_m; const Index m_n; SVDMatOp<Scalar>* m_op; SymEigsSolver<SVDMatOp<Scalar>>* m_eigs; Index m_nconv; Matrix m_evecs; public: // Constructor PartialSVDSolver(ConstGenericMatrix& mat, Index ncomp, Index ncv) : m_mat(mat), m_m(mat.rows()), m_n(mat.cols()), m_evecs(0, 0) { // Determine the matrix type, tall or wide if (m_m > m_n) { m_op = new SVDTallMatOp<Scalar, MatrixType>(mat); } else { m_op = new SVDWideMatOp<Scalar, MatrixType>(mat); } // Solver object m_eigs = new SymEigsSolver<SVDMatOp<Scalar>>(*m_op, ncomp, ncv); } // Destructor virtual ~PartialSVDSolver() { delete m_eigs; delete m_op; } // Computation Index compute(Index maxit = 1000, Scalar tol = 1e-10) { m_eigs->init(); m_nconv = m_eigs->compute(SortRule::LargestAlge, maxit, tol); return m_nconv; } // The converged singular values Vector singular_values() const { Vector svals = m_eigs->eigenvalues().cwiseSqrt(); return svals; } // The converged left singular vectors Matrix matrix_U(Index nu) { if (m_evecs.cols() < 1) { m_evecs = m_eigs->eigenvectors(); } nu = (std::min)(nu, m_nconv); if (m_m <= m_n) { return m_evecs.leftCols(nu); } return m_mat * (m_evecs.leftCols(nu).array().rowwise() / m_eigs->eigenvalues().head(nu).transpose().array().sqrt()).matrix(); } // The converged right singular vectors Matrix matrix_V(Index nv) { if (m_evecs.cols() < 1) { m_evecs = m_eigs->eigenvectors(); } nv = (std::min)(nv, m_nconv); if (m_m > m_n) { return m_evecs.leftCols(nv); } return m_mat.transpose() * (m_evecs.leftCols(nv).array().rowwise() / m_eigs->eigenvalues().head(nv).transpose().array().sqrt()).matrix(); } }; } // namespace Spectra #endif // SPECTRA_PARTIAL_SVD_SOLVER_H
5,921
26.933962
145
h
abess
abess-master/python/setup.py
import os import re import sys import distutils import subprocess # import platform from setuptools import Extension, setup, find_packages from setuptools.command.build_ext import build_ext CURRENT_DIR = os.path.abspath(os.path.dirname(__file__)) PLAT_TO_CMAKE = { "win32": "Win32", "win-amd64": "x64", "win-arm32": "ARM", "win-arm64": "ARM64", } class CMakeExtension(Extension): def __init__(self, name, sourcedir=""): Extension.__init__(self, name, sources=[]) self.sourcedir = os.path.abspath(sourcedir) self.parallel = 4 class CMakeBuild(build_ext): def build_extension(self, ext): extdir = os.path.abspath( os.path.dirname( self.get_ext_fullpath( ext.name))) # required for auto-detection & inclusion of auxiliary "native" libs if not extdir.endswith(os.path.sep): extdir += os.path.sep debug = int( os.environ.get( "DEBUG", 0)) if self.debug is None else self.debug cfg = "Debug" if debug else "Release" # CMake lets you override the generator - we need to check this. # Can be set with Conda-Build, for example. cmake_generator = os.environ.get("CMAKE_GENERATOR", "") cmake_args = [ f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={extdir}", f"-DPYTHON_EXECUTABLE={sys.executable}", f"-DCMAKE_BUILD_TYPE={cfg}" ] build_args = [] # Adding CMake arguments set as environment variable # (needed e.g. to build for ARM OSx on conda-forge) if "CMAKE_ARGS" in os.environ: cmake_args += [ item for item in os.environ["CMAKE_ARGS"].split(" ") if item] if self.compiler.compiler_type != "msvc": # Using Ninja-build since it a) is available as a wheel and b) # multithreads automatically. MSVC would require all variables be # exported for Ninja to pick it up, which is a little tricky to do. # Users can override the generator with CMAKE_GENERATOR in CMake # 3.15+. if not cmake_generator: try: import ninja # noqa: F401 cmake_args += ["-GNinja"] except ImportError: pass else: cmake_args += ["-DMSVC=ON"] # Single config generators are handled "normally" single_config = any( x in cmake_generator for x in { "NMake", "Ninja"}) # CMake allows an arch-in-generator style for backward # compatibility contains_arch = any(x in cmake_generator for x in {"ARM", "Win64"}) # Specify the arch if using MSVC generator, but only if it doesn't # contain a backward-compatibility arch spec already in the # generator name. if not single_config and not contains_arch: cmake_args += ["-A", PLAT_TO_CMAKE[self.plat_name]] # Multi-config generators have a different way to specify configs if not single_config: cmake_args += [ f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{cfg.upper()}={extdir}" ] build_args += ["--config", cfg] if sys.platform.startswith("darwin"): cmake_args += ["-DDARWIN=ON"] # Cross-compile support for macOS - respect ARCHFLAGS if set archs = re.findall(r"-arch (\S+)", os.environ.get("ARCHFLAGS", "")) if archs: cmake_args += [ "-DCMAKE_OSX_ARCHITECTURES={}".format(";".join(archs))] # Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level # across all generators. if "CMAKE_BUILD_PARALLEL_LEVEL" not in os.environ: # self.parallel is a Python 3 only way to set parallel jobs by hand # using -j in the build_ext call, not supported by pip or # PyPA-build. if hasattr(self, "parallel") and self.parallel: # CMake 3.12+ only. build_args += [f"-j{self.parallel}"] build_temp = os.path.join(self.build_temp, ext.name) if not os.path.exists(build_temp): os.makedirs(build_temp) subprocess.check_call(["cmake", ext.sourcedir] + cmake_args, cwd=build_temp) subprocess.check_call(["cmake", "--build", "."] + build_args, cwd=build_temp) def get_info(): # get information from `__init__.py` labels = ["__version__", "__author__"] values = ["" for _ in labels] with open(os.path.join(CURRENT_DIR, "abess/__init__.py")) as f: for line in f.read().splitlines(): for i, label in enumerate(labels): if line.startswith(label): values[i] = line.split('"')[1] break if "" not in values: break with open(os.path.join(CURRENT_DIR, 'README.rst'), encoding='utf-8') as f: s = f.read() labels.append("long_description") values.append(s) return dict(zip(labels, values)) def copy_src(): # copy files from parent dir need_clean_tree = set() try: src_dir = os.path.join(CURRENT_DIR, os.path.pardir) dst = os.path.join(CURRENT_DIR, 'src') src = os.path.join(src_dir, 'src') distutils.dir_util.copy_tree(src, dst) need_clean_tree.add(os.path.abspath(dst)) dst = os.path.join(CURRENT_DIR, 'include') src = os.path.join(src_dir, 'include') distutils.dir_util.copy_tree(src, dst) need_clean_tree.add(os.path.abspath(dst)) except BaseException: pass return need_clean_tree # print("sys.platform output: {}".format(sys.platform)) # print("platform.processor() output: {}".format(platform.processor())) need_clean_tree = copy_src() package_info = get_info() setup( name='abess', version=package_info['__version__'], author=package_info['__author__'], author_email="zhuj37@mail2.sysu.edu.cn", maintainer="Junhao Huang", maintainer_email="huangjh256@mail2.sysu.edu.cn", # package_dir={'': CURRENT_DIR}, packages=find_packages(), description="abess: Fast Best Subset Selection", long_description=package_info['long_description'], long_description_content_type="text/x-rst", install_requires=[ "numpy", "pandas", "scipy", "scikit-learn>=0.24" ], license="GPL-3", url="https://abess.readthedocs.io", download_url="https://pypi.python.org/pypi/abess", project_urls={ "Bug Tracker": "https://github.com/abess-team/abess/issues", "Documentation": "https://abess.readthedocs.io", "Source Code": "https://github.com/abess-team/abess", }, classifiers=[ "Intended Audience :: Science/Research", "Intended Audience :: Developers", "Programming Language :: C++", "Programming Language :: Python", "Topic :: Software Development", "Topic :: Scientific/Engineering", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Topic :: Scientific/Engineering :: Mathematics", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX :: Linux", "Operating System :: MacOS", "Programming Language :: Python", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", ], python_requires='>=3.6', ext_modules=[CMakeExtension("abess.pybind_cabess")], cmdclass={"build_ext": CMakeBuild} )
7,986
34.816143
79
py
abess
abess-master/python/abess/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : # @Author : # @Site : # @File : __init__.py __version__ = "0.4.7rc1" __author__ = ("Jin Zhu, Kangkang Jiang, " "Junhao Huang, Yanhang Zhang, " "Yanhang Zhang, Shiyun Lin, " "Junxian Zhu, Xueqin Wang") from .linear import ( LinearRegression, LogisticRegression, CoxPHSurvivalAnalysis, PoissonRegression, MultiTaskRegression, MultinomialRegression, GammaRegression, OrdinalRegression ) from .decomposition import (SparsePCA, RobustPCA) from .datasets import (make_glm_data, make_multivariate_glm_data) # To be deprecated in version 0.6.0 from .linear import ( # noqa abessLm, abessLogistic, abessCox, abessPoisson, abessMultigaussian, abessMultinomial, abessGamma ) from .pca import (abessPCA, abessRPCA) # noqa __all__ = [ # linear "LinearRegression", "LogisticRegression", "CoxPHSurvivalAnalysis", "PoissonRegression", "MultiTaskRegression", "MultinomialRegression", "GammaRegression", "OrdinalRegression", # decomposition "SparsePCA", "RobustPCA", # datasets "make_glm_data", "make_multivariate_glm_data" ]
1,222
22.980392
65
py
abess
abess-master/python/abess/bess_base.py
import numbers import warnings import numpy as np import pandas as pd from scipy.sparse import coo_matrix, csr_matrix from sklearn.base import BaseEstimator from sklearn.utils.validation import check_X_y from sklearn.exceptions import DataConversionWarning from .pybind_cabess import pywrap_GLM from .utilities import categorical_to_dummy class bess_base(BaseEstimator): r""" Parameters ---------- path_type : {"seq", "gs"}, optional, default="seq" The method to be used to select the optimal support size. - For path_type = "seq", we solve the best subset selection problem for each size in support_size. - For path_type = "gs", we solve the best subset selection problem with support size ranged in (s_min, s_max), where the specific support size to be considered is determined by golden section. support_size : array-like, optional default=range(min(n, int(n/(log(log(n))log(p))))). An integer vector representing the alternative support sizes. Only used when path_type = "seq". s_min : int, optional, default=0 The lower bound of golden-section-search for sparsity searching. s_max : int, optional, default=min(n, int(n/(log(log(n))log(p)))). The higher bound of golden-section-search for sparsity searching. group : int, optional, default=np.ones(p) The group index for each variable. alpha : float, optional, default=0 Constant that multiples the L2 term in loss function, controlling regularization strength. It should be non-negative. - If alpha = 0, it indicates ordinary least square. fit_intercept : bool, optional, default=True Whether to consider intercept in the model. We assume that the data has been centered if fit_intercept=False. ic_type : {'aic', 'bic', 'gic', 'ebic', 'loss'}, optional, default='ebic' The type of criterion for choosing the support size if `cv=1`. ic_coef : float, optional, default=1.0 Constant that controls the regularization strength on chosen information criterion. cv : int, optional, default=1 The folds number when use the cross-validation method. - If cv=1, cross-validation would not be used. - If cv>1, support size will be chosen by CV's test loss, instead of IC. cv_score : {'test_loss', ...}, optional, default='test_loss' The score used on test data for CV. - All methods support {'test_loss'}. - LogisticRegression also supports {'roc_auc'}. - MultinomialRegression also supports {'roc_auc_ovo', 'roc_auc_ovr'}, which indicate "One vs One/Rest" algorithm, respectively. thread : int, optional, default=1 Max number of multithreads. - If thread = 0, the maximum number of threads supported by the device will be used. A_init : array-like, optional, default=None Initial active set before the first splicing. always_select : array-like, optional, default=None An array contains the indexes of variables we want to consider in the model. For group selection, it should be the indexes of groups (start from 0). max_iter : int, optional, default=20 Maximum number of iterations taken for the splicing algorithm to converge. Due to the limitation of loss reduction, the splicing algorithm must be able to converge. The number of iterations is only to simplify the implementation. is_warm_start : bool, optional, default=True When tuning the optimal parameter combination, whether to use the last solution as a warm start to accelerate the iterative convergence of the splicing algorithm. screening_size : int, optional, default=-1 The number of variables remaining after screening. It should be a non-negative number smaller than p, but larger than any value in support_size. - If screening_size=-1, screening will not be used. - If screening_size=0, screening_size will be set as :math:`\\min(p, int(n / (\\log(\\log(n))\\log(p))))`. primary_model_fit_max_iter : int, optional, default=10 The maximal number of iteration for primary_model_fit. primary_model_fit_epsilon : float, optional, default=1e-08 The epsilon (threshold) of iteration for primary_model_fit. Attributes ---------- coef_ : array-like, shape(p_features, ) or (p_features, M_responses) Estimated coefficients for the best subset selection problem. intercept_ : float or array-like, shape(M_responses,) The intercept in the model when fit_intercept=True. train_loss_ : float The loss on training data. eval_loss_ : float - If cv=1, it stores the score under chosen information criterion. - If cv>1, it stores the test loss under cross-validation. References ---------- - Junxian Zhu, Canhong Wen, Jin Zhu, Heping Zhang, and Xueqin Wang. A polynomial algorithm for best-subset selection problem. Proceedings of the National Academy of Sciences, 117(52):33117-33123, 2020. """ # attributes coef_ = None intercept_ = None train_loss_ = 0 eval_loss_ = 0 def __init__( self, algorithm_type, model_type, normalize_type, path_type="seq", support_size=None, s_min=None, s_max=None, group=None, alpha=None, fit_intercept=True, ic_type="ebic", ic_coef=1.0, cv=1, cv_score="test_loss", thread=1, A_init=None, always_select=None, max_iter=20, exchange_num=5, is_warm_start=True, splicing_type=0, important_search=0, screening_size=-1, primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-8, approximate_Newton=False, covariance_update=False, # lambda_min=None, lambda_max=None, # early_stop=False, n_lambda=100, baseline_model=None, _estimator_type=None ): self.algorithm_type = algorithm_type self.model_type = model_type self.normalize_type = normalize_type self.path_type = path_type self.max_iter = max_iter self.exchange_num = exchange_num self.is_warm_start = is_warm_start self.support_size = support_size self.alpha = alpha self.fit_intercept = fit_intercept self.n_features_in_: int self.n_iter_: int self.s_min = s_min self.s_max = s_max self.A_init = A_init self.group = group # self.lambda_min = None # self.lambda_max = None # self.n_lambda = 100 self.ic_type = ic_type self.ic_coef = ic_coef self.cv = cv self.cv_score = cv_score self.screening_size = screening_size self.always_select = always_select self.primary_model_fit_max_iter = primary_model_fit_max_iter self.primary_model_fit_epsilon = primary_model_fit_epsilon # self.early_stop = False self.approximate_Newton = approximate_Newton self.thread = thread self.covariance_update = covariance_update self.splicing_type = splicing_type self.important_search = important_search self.baseline_model = baseline_model self._estimator_type = _estimator_type self.classes_: np.ndarray def fit(self, X=None, y=None, is_normal=True, sample_weight=None, cv_fold_id=None, sparse_matrix=False): r""" The fit function is used to transfer the information of data and return the fit result. Parameters ---------- X : array-like of shape(n_samples, p_features) Training data matrix. It should be a numpy array. y : array-like of shape(n_samples,) or (n_samples, M_responses) Training response values. It should be a numpy array. - For regression problem, the element of y should be float. - For classification problem, the element of y should be either 0 or 1. In multinomial regression, the p features are actually dummy variables. - For survival data, y should be a :math:`n \times 2` array, where the columns indicates "censoring" and "time", respectively. is_normal : bool, optional, default=True whether normalize the variables array before fitting the algorithm. sample_weight : array-like, shape (n_samples,), optional Individual weights for each sample. Only used for is_weight=True. Default=np.ones(n). cv_fold_id : array-like, shape (n_samples,), optional, default=None An array indicates different folds in CV. Samples in the same fold should be given the same number. sparse_matrix : bool, optional, default=False Set as True to treat X as sparse matrix during fitting. It would be automatically set as True when X has the sparse matrix type defined in scipy.sparse. """ # Check that X and y have correct shape X, y = check_X_y(X, y, accept_sparse=True, multi_output=True, # y_numeric=True, dtype='numeric') # Input check & init: if isinstance(X, (list, np.ndarray, np.matrix, pd.DataFrame, coo_matrix, csr_matrix)): if isinstance(X, (coo_matrix, csr_matrix)): sparse_matrix = True # Sort for Cox if self.model_type == "Cox": X = X[y[:, 0].argsort()] y = y[y[:, 0].argsort()] time = y[:, 0].reshape(-1) y = y[:, 1].reshape(-1) # Dummy y & classes if self.model_type == "Logistic": y, self.classes_ = categorical_to_dummy(y.squeeze()) if self.classes_.size > 2: raise ValueError("Up to 2 classes can be given in y.") if self.classes_.size == 1: y = np.zeros(X.shape[0]) else: y = y[:, 1] elif (self.model_type in ("Multinomial", "Ordinal") and (len(y.shape) == 1 or y.shape[1] == 1)): y, self.classes_ = categorical_to_dummy(y.squeeze()) if self.classes_.size == 1: # add a useless label y = np.hstack((np.zeros((X.shape[0], 1)), y)) self.classes_ = np.insert(self.classes_, 0, 0) # multi_output warning if self.model_type in ( 'Lm', 'Logistic', 'Poisson', 'Gamma'): if len(y.shape) > 1: warnings.warn( "A column-vector y was passed " "when a 1d array was expected", DataConversionWarning) y = y.reshape(-1) # Init n = X.shape[0] p = X.shape[1] self.n_features_in_ = p if y.ndim == 1: M = 1 y = y.reshape(len(y), 1) else: M = y.shape[1] else: raise ValueError("X should be a matrix or sparse matrix.") # Algorithm_type: abess if self.algorithm_type == "abess": algorithm_type_int = 6 else: raise ValueError("algorithm_type should not be " + str(self.algorithm_type)) # Model_type: lm, logit, poiss, cox, multi-gaussian, multi-nomial if self.model_type == "Lm": model_type_int = 1 elif self.model_type == "Logistic": model_type_int = 2 elif self.model_type == "Poisson": model_type_int = 3 elif self.model_type == "Cox": model_type_int = 4 elif self.model_type == "Multigaussian": model_type_int = 5 elif self.model_type == "Multinomial": model_type_int = 6 elif self.model_type == 'Gamma': model_type_int = 8 elif self.model_type == 'Ordinal': model_type_int = 9 else: raise ValueError("model_type should not be " + str(self.model_type)) # Path_type: seq, gs if self.path_type == "seq": path_type_int = 1 elif self.path_type == "gs": path_type_int = 2 else: raise ValueError("path_type should be \'seq\' or \'gs\'") # cv if (not isinstance(self.cv, int) or self.cv <= 0): raise ValueError("cv should be an positive integer.") if self.cv > n: raise ValueError("cv should be smaller than n.") # Ic_type: aic, bic, gic, ebic # cv_score: test_loss, roc_auc if self.cv == 1: if self.ic_type == "loss": eval_type_int = 0 elif self.ic_type == "aic": eval_type_int = 1 elif self.ic_type == "bic": eval_type_int = 2 elif self.ic_type == "gic": eval_type_int = 3 elif self.ic_type == "ebic": eval_type_int = 4 elif self.ic_type == "hic": eval_type_int = 5 else: raise ValueError( "ic_type should be \"aic\", \"bic\", \"ebic\"," " \"gic\" or \"hic\".") else: if self.cv_score == "test_loss": eval_type_int = 0 elif self.cv_score == "roc_auc" and self.model_type == "Logistic": eval_type_int = 1 elif (self.cv_score == "roc_auc_ovo" and self.model_type == "Multinomial"): eval_type_int = 2 elif (self.cv_score == "roc_auc_ovr" and self.model_type == "Multinomial"): eval_type_int = 3 else: raise ValueError( "cv_score should be \"test_loss\", " "\"roc_auc\"(for logistic), " "\"roc_auc_ovo\"(for multinomial), or " "\"roc_auc_ovr\"(for multinomial).") # cv_fold_id if cv_fold_id is None: cv_fold_id = np.array([], dtype="int32") else: cv_fold_id = np.array(cv_fold_id, dtype="int32") if cv_fold_id.ndim > 1: raise ValueError( "cv_fold_id should be an 1D array of integers.") if cv_fold_id.size != n: raise ValueError( "The length of cv_fold_id should be equal to X.shape[0].") if len(set(cv_fold_id)) != self.cv: raise ValueError( "The number of different masks should be equal to `cv`.") # A_init if self.A_init is None: A_init_list = np.array([], dtype="int32") else: A_init_list = np.array(self.A_init, dtype="int32") if A_init_list.ndim > 1: raise ValueError("The initial active set should be " "an 1D array of integers.") if (A_init_list.min() < 0 or A_init_list.max() >= p): raise ValueError("A_init contains out-of-range index.") # Group: if self.group is None: g_index = list(range(p)) else: g = np.array(self.group) if g.ndim > 1: raise ValueError("group should be an 1D array of integers.") if g.size != p: raise ValueError( "The length of group should be equal to X.shape[1].") group_set = list(set(g)) g.sort() g_index = [] j = 0 for i in group_set: while g[j] != i: j += 1 g_index.append(j) # sample_weight: if sample_weight is None: sample_weight = np.ones(n) else: sample_weight = np.array(sample_weight, dtype="float") if sample_weight.ndim > 1: raise ValueError("sample_weight should be a 1-D array.") if sample_weight.size != n: raise ValueError( "X.shape[0] should be equal to sample_weight.size") useful_index = list() for i, w in enumerate(sample_weight): if w > 0: useful_index.append(i) if len(useful_index) < n: X = X[useful_index, :] y = y[useful_index, :] if len(y.shape) > 1 else y[useful_index] sample_weight = sample_weight[useful_index] n = len(useful_index) # Path parameters if path_type_int == 1: # seq if self.support_size is None: if (n == 1 or p == 1): support_sizes = [0, 1] else: support_sizes = list( range(0, max(min( p, int(n / (np.log(np.log(n)) * np.log(p))) ), 1))) else: if isinstance(self.support_size, (numbers.Real, numbers.Integral)): support_sizes = np.empty(1, dtype=int) support_sizes[0] = self.support_size elif (np.any(np.array(self.support_size) > p) or np.any(np.array(self.support_size) < 0)): raise ValueError( "All support_size should be between 0 and X.shape[1]") else: support_sizes = self.support_size if self.alpha is None: alphas = [0] else: if isinstance(self.alpha, (numbers.Real, numbers.Integral)): alphas = np.empty(1, dtype=float) alphas[0] = self.alpha else: alphas = self.alpha # unused new_s_min = 0 new_s_max = 0 new_lambda_min = 0 new_lambda_max = 0 elif path_type_int == 2: # gs new_s_min = 0 \ if self.s_min is None else self.s_min new_s_max = min(p, int(n / (np.log(np.log(n)) * np.log(p)))) \ if self.s_max is None else self.s_max new_lambda_min = 0 # \ # if self.lambda_min is None else self.lambda_min new_lambda_max = 0 # \ # if self.lambda_max is None else self.lambda_max if new_s_max < new_s_min: raise ValueError("s_max should be larger than s_min") # if new_lambda_max < new_lambda_min: # raise ValueError( # "lambda_max should be larger than lambda_min.") # unused support_sizes = [0] alphas = [0] support_sizes = np.array(support_sizes, dtype='int32') # Exchange_num if (not isinstance(self.exchange_num, int) or self.exchange_num <= 0): raise ValueError("exchange_num should be an positive integer.") # elif (self.exchange_num > min(support_sizes)): # print("[Warning] exchange_num may be larger than sparsity, " # "and it would be set up to sparsity.") # screening if self.screening_size != -1: if self.screening_size == 0: self.screening_size = min( p, int(n / (np.log(np.log(n)) * np.log(p)))) elif self.screening_size > p: raise ValueError( "screening size should be smaller than X.shape[1].") elif self.screening_size < max(support_sizes): raise ValueError( "screening size should be more than max(support_size).") # Primary fit parameters if (not isinstance(self.primary_model_fit_max_iter, int) or self.primary_model_fit_max_iter <= 0): raise ValueError( "primary_model_fit_max_iter should be an positive integer.") if self.primary_model_fit_epsilon < 0: raise ValueError( "primary_model_fit_epsilon should be non-negative.") # Thread if (not isinstance(self.thread, int) or self.thread < 0): raise ValueError("thread should be positive number or 0" " (maximum supported by your device).") # Splicing type if self.splicing_type not in (0, 1): raise ValueError("splicing type should be 0 or 1.") # Important_search if (not isinstance(self.important_search, int) or self.important_search < 0): raise ValueError( "important_search should be a non-negative number.") # Sparse X if sparse_matrix: if not isinstance(X, (coo_matrix)): # print("sparse matrix 1") nonzero = 0 tmp = np.zeros([X.shape[0] * X.shape[1], 3]) for j in range(X.shape[1]): for i in range(X.shape[0]): if X[i, j] != 0.: tmp[nonzero, :] = np.array([X[i, j], i, j]) nonzero += 1 X = tmp[:nonzero, :] else: # print("sparse matrix 2") tmp = np.zeros([len(X.data), 3]) tmp[:, 1] = X.row tmp[:, 2] = X.col tmp[:, 0] = X.data ind = np.lexsort((tmp[:, 2], tmp[:, 1])) X = tmp[ind, :] # normalize normalize = 0 if is_normal: normalize = self.normalize_type # always_select if self.always_select is None: always_select_list = np.zeros(0, dtype="int32") else: always_select_list = np.array(self.always_select, dtype="int32") # unused n_lambda = 100 early_stop = False self.n_iter_ = self.max_iter # wrap with cpp # print("wrap enter.")#/// if n == 1: # with only one sample, nothing to be estimated result = [np.zeros((p, M)), np.zeros(M), 0, 0, 0] else: result = pywrap_GLM( X, y, sample_weight, n, p, normalize, algorithm_type_int, model_type_int, self.max_iter, self.exchange_num, path_type_int, self.is_warm_start, eval_type_int, self.ic_coef, self.cv, g_index, support_sizes, alphas, cv_fold_id, new_s_min, new_s_max, new_lambda_min, new_lambda_max, n_lambda, self.screening_size, always_select_list, self.primary_model_fit_max_iter, self.primary_model_fit_epsilon, early_stop, self.approximate_Newton, self.thread, self.covariance_update, sparse_matrix, self.splicing_type, self.important_search, A_init_list, self.fit_intercept) self.coef_ = result[0].squeeze() self.intercept_ = result[1].squeeze() self.train_loss_ = result[2] # self.test_loss_ = result[3] # self.ic_ = result[4] self.eval_loss_ = result[3] if (self.cv > 1) else result[4] if self.model_type == "Cox": self.baseline_model.fit(np.dot(X, self.coef_), y, time) if self.model_type == "Ordinal" and self.coef_.ndim > 1: self.coef_ = self.coef_[:, 0] return self
24,190
37.7056
79
py
abess
abess-master/python/abess/datasets.py
import numpy as np def sample(p, k): full = np.arange(p) select = sorted(np.random.choice(full, k, replace=False)) return select def sparse_beta_generator(p, Nonzero, k, M): Tbeta = np.zeros([p, M]) beta_value = beta_generator(k, M) Tbeta[Nonzero, :] = beta_value return Tbeta def beta_generator(k, M): # # strong_num <- 3 # # moderate_num <- 7 # # weak_num <- 5 # # strong_num <- 10 # # moderate_num <- 10 # # weak_num <- 10 strong_num = int(k * 0.3) moderate_num = int(k * 0.4) weak_num = k - strong_num - moderate_num # signal_num = strong_num + moderate_num + weak_num strong_signal = np.random.normal( 0, 10, strong_num * M).reshape(strong_num, M) moderate_signal = np.random.normal( 0, 5, moderate_num * M).reshape(moderate_num, M) weak_signal = np.random.normal(0, 2, weak_num * M).reshape(weak_num, M) beta_value = np.concatenate((strong_signal, moderate_signal, weak_signal)) beta_value = beta_value[sample(k, k), :] # beta_value = np.random.normal(size=(k, M)) return beta_value class make_glm_data: r""" Generate a dataset with single response. Parameters ---------- n: int The number of observations. p: int The number of predictors of interest. k: int The number of nonzero coefficients in the underlying regression model. family: {gaussian, binomial, poisson, gamma, cox} The distribution of the simulated response. "gaussian" for univariate quantitative response, "binomial" for binary classification response, "poisson" for counting response, "gamma" for positive continuous response, "cox" for left-censored response. rho: float, optional, default=0 A parameter used to characterize the pairwise correlation in predictors. corr_type: string, optional, default="const" The structure of correlation matrix. "const" for constant pairwise correlation, "exp" for pairwise correlation with exponential decay. sigma: float, optional, default=1 The variance of the gaussian noise. It would be unused if snr is not None. coef_: array_like, optional, default=None The coefficient values in the underlying regression model. censoring: bool, optional, default=True For Cox data, it indicates whether censoring is existed. c: int, optional, default=1 For Cox data and censoring=True, it indicates the maximum censoring time. So that all observations have chances to be censored at (0, c). scal: float, optional, default=10 The scale of survival time in Cox data. snr: float, optional, default=None A numerical value controlling the signal-to-noise ratio (SNR) in gaussian data. class_num: int, optional, default=3 The number of possible classes in oridinal dataset, i.e. :math:`y \in \{0, 1, 2, ..., \text{class_num}-1\}` Attributes ---------- x: array-like, shape(n, p) Design matrix of predictors. y: array-like, shape(n,) Response variable. coef_: array-like, shape(p,) The coefficients used in the underlying regression model. It has k nonzero values. Notes ----- The output, whose type is named ``data``, contains three elements: ``x``, ``y`` and ``coef_``, which correspond the variables, responses and coefficients, respectively. Each row of ``x`` or ``y`` indicates a sample and is independent to the other. We denote :math:`x, y, \beta` for one sample in the math formulas below. * Linear Regression * Usage: ``family='gaussian'[, sigma=...]`` * Model: :math:`y \sim N(\mu, \sigma^2),\ \mu = x^T\beta`. * the coefficient :math:`\beta\sim U[m, 100m]`, where :math:`m = 5\sqrt{2\log p/n}`; * the variance :math:`\sigma = 1`. * Logistic Regression * Usage: ``family='binomial'`` * Model: :math:`y \sim \text{Binom}(\pi),\ \text{logit}(\pi) = x^T \beta`. * the coefficient :math:`\beta\sim U[2m, 10m]`, where :math:`m = 5\sqrt{2\log p/n}`. * Poisson Regression * Usage: ``family='poisson'`` * Model: :math:`y \sim \text{Poisson}(\lambda),\ \lambda = \exp(x^T \beta)`. * the coefficient :math:`\beta\sim U[2m, 10m]`, where :math:`m = 5\sqrt{2\log p/n}`. * Gamma Regression * Usage: ``family='gamma'`` * Model: :math:`y \sim \text{Gamma}(k, \theta),\ k\theta = -1/(x^T \beta + \epsilon), k\sim U[0.1, 100.1]` in shape-scale definition. * the coefficient :math:`\beta\sim U[m, 100m]`, where :math:`m = 5\sqrt{2\log p/n}`. * Cox PH Survival Analysis * Usage: ``family='cox'[, scal=..., censoring=..., c=...]`` * Model: :math:`y=\min(t,C)`, where :math:`t = \left[-\dfrac{\log U}{\exp(X \beta)}\right]^s,\ U\sim N(0,1),\ s=\dfrac{1}{\text{scal}}` and censoring time :math:`C\sim U(0, c)`. * the coefficient :math:`\beta\sim U[2m, 10m]`, where :math:`m = 5\sqrt{2\log p/n}`; * the scale of survival time :math:`\text{scal} = 10`; * censoring is enabled, and max censoring time :math:`c=1`. * Ordinal Regression * Usage: ``family='ordinal'[, class_num=...]`` * Model: :math:`y\in \{0, 1, \dots, n_{class}\}`, :math:`\mathbb{P}(y\leq i) = \dfrac{1} {1+\exp(-x^T\beta - \varepsilon_i)}`, where :math:`i\in \{0, 1, \dots, n_{class}\}` and :math:`\forall i<j, \varepsilon_i < \varepsilon_j`. * the coefficient :math:`\beta\sim U[-M, M]`, where :math:`M = 125\sqrt{2\log p/n}`; * the intercept: :math:`\forall i,\varepsilon_i\sim U[-M, M]`; * the number of classes :math:`n_{class}=3`. """ def __init__(self, n, p, k, family, rho=0, corr_type="const", sigma=1, coef_=None, censoring=True, c=1, scal=10, snr=None, class_num=3): self.n = n self.p = p self.k = k self.family = family if corr_type == "exp": # generate correlation matrix with exponential decay R = np.zeros((p, p)) for i in range(p): for j in range(i, p): R[i, j] = rho ** abs(i - j) R = R + R.T - np.identity(p) elif corr_type == "const": # generate correlation matrix with constant correlation R = np.ones((p, p)) * rho for i in range(p): R[i, i] = 1 else: raise ValueError( "corr_type should be \'const\' or \'exp\'") x = np.random.multivariate_normal(mean=np.zeros(p), cov=R, size=(n,)) nonzero = sample(p, k) Tbeta = np.zeros(p) sign = np.random.choice([1, -1], k) if family == "gaussian": m = 5 * np.sqrt(2 * np.log(p) / n) M = 100 * m if coef_ is None: Tbeta[nonzero] = np.random.uniform(m, M, k) * sign else: Tbeta = coef_ if snr is None: y = np.matmul(x, Tbeta) + sigma * np.random.normal(0, 1, n) else: y = np.matmul(x, Tbeta) power = np.mean(np.square(y)) npower = power / 10 ** (snr / 10) noise = np.random.randn(len(y)) * np.sqrt(npower) y += noise elif family == "binomial": m = 5 * sigma * np.sqrt(2 * np.log(p) / n) if coef_ is None: Tbeta[nonzero] = np.random.uniform(2 * m, 10 * m, k) * sign else: Tbeta = coef_ xbeta = np.matmul(x, Tbeta) xbeta[xbeta > 30] = 30 xbeta[xbeta < -30] = -30 p = np.exp(xbeta) / (1 + np.exp(xbeta)) y = np.random.binomial(1, p) elif family == "poisson": x = x / 16 m = 5 * sigma * np.sqrt(2 * np.log(p) / n) if coef_ is None: Tbeta[nonzero] = np.random.uniform(2 * m, 10 * m, k) * sign # Tbeta[nonzero] = np.random.normal(0, 4*m, k) else: Tbeta = coef_ xbeta = np.matmul(x, Tbeta) xbeta[xbeta > 30] = 30 xbeta[xbeta < -30] = -30 lam = np.exp(xbeta) y = np.random.poisson(lam=lam) elif family == "cox": m = 5 * sigma * np.sqrt(2 * np.log(p) / n) if coef_ is None: Tbeta[nonzero] = np.random.uniform(2 * m, 10 * m, k) * sign else: Tbeta = coef_ time = np.power(-np.log(np.random.uniform(0, 1, n)) / np.exp(np.matmul(x, Tbeta)), 1 / scal) if censoring: ctime = c * np.random.uniform(0, 1, n) status = (time < ctime) * 1 censoringrate = 1 - sum(status) / n print("censoring rate:" + str(censoringrate)) for i in range(n): time[i] = min(time[i], ctime[i]) else: status = np.ones(n) print("no censoring") y = np.hstack((time.reshape((-1, 1)), status.reshape((-1, 1)))) elif family == "gamma": x = x / 16 m = 5 * np.sqrt(2 * np.log(p) / n) if coef_ is None: Tbeta[nonzero] = np.random.uniform(m, 100 * m, k) * sign else: Tbeta = coef_ # add noise eta = x @ Tbeta + np.random.normal(0, sigma, n) # set coef_0 to make eta<0 eta = eta - np.abs(np.max(eta)) - 10 eta = -1 / eta # set the shape para of gamma uniformly in [0.1,100.1] shape_para = 100 * np.random.uniform(0, 1, n) + 0.1 y = np.random.gamma( shape=shape_para, scale=eta / shape_para, size=n) elif family == "ordinal": M = 125 * np.sqrt(2 * np.log(p) / n) if coef_ is None: Tbeta[nonzero] = np.random.uniform(-M, M, k) else: Tbeta = coef_ intercept = np.sort(np.random.uniform(-M, M, class_num - 1)) eta = x @ Tbeta[:, np.newaxis] + intercept logit = 1 / (1 + np.exp(-eta)) # prob prob = np.zeros((n, class_num)) prob[:, 0] = logit[:, 0] prob[:, 1:class_num - 1] = (logit[:, 1:class_num - 1] - logit[:, 0:class_num - 2]) prob[:, class_num - 1] = 1 - logit[:, class_num - 2] # y y = np.zeros(n) for i in range(n): y[i] = np.random.choice(np.arange(class_num), 1, p=prob[i, :]) else: raise ValueError( "Family should be \'gaussian\', \'binomial\', " "\'poisson\', \'gamma\', \'cox\', or \'ordinal\'.") self.x = x self.y = y self.coef_ = Tbeta class make_multivariate_glm_data: r""" Generate a dataset with multi-responses. Parameters ---------- n: int, optional, default=100 The number of observations. p: int, optional, default=100 The number of predictors of interest. family: {multigaussian, multinomial, poisson}, optional default="multigaussian". The distribution of the simulated multi-response. "multigaussian" for multivariate quantitative responses, "multinomial" for multiple classification responses, "poisson" for counting responses. k: int, optional, default=10 The number of nonzero coefficients in the underlying regression model. M: int, optional, default=1 The number of responses. rho: float, optional, default=0.5 A parameter used to characterize the pairwise correlation in predictors. corr_type: string, optional, default="const" The structure of correlation matrix. "const" for constant pairwise correlation, "exp" for pairwise correlation with exponential decay. coef_: array_like, optional, default=None The coefficient values in the underlying regression model. sparse_ratio: float, optional, default=None The sparse ratio of predictor matrix (x). Attributes ---------- x: array-like, shape(n, p) Design matrix of predictors. y: array-like, shape(n, M) Response variable. coef_: array-like, shape(p, M) The coefficients used in the underlying regression model. It is rowwise sparse, with k nonzero rows. Notes ----- The output, whose type is named ``data``, contains three elements: ``x``, ``y`` and ``coef_``, which correspond the variables, responses and coefficients, respectively. Note that the ``y`` and ``coef_`` here are both matrix: 1. each row of ``x`` and ``y`` indicates a sample; 2. each column of ``coef_`` corresponds to the effect on one response. It is rowwise sparsity. Under this setting, a "useful" variable is relevant to all responses. We :math:`x, y, \beta` for one sample in the math formulas below. * Multitask Regression * Usage: ``family='multigaussian'`` * Model: :math:`y \sim MVN(\mu, \Sigma),\ \mu^T=x^T \beta`. * the variance :math:`\Sigma = \text{diag}(1, 1, \cdots, 1)`; * the coefficient :math:`\beta` contains 30% "strong" values, 40% "moderate" values and the rest are "weak". They come from :math:`N(0, 10)`, :math:`N(0, 5)` and :math:`N(0, 2)`, respectively. * Multinomial Regression * Usage: ``family='multinomial'`` * Model: :math:`y` is a "0-1" array with only one "1". Its index is chosed under probabilities :math:`\pi = \exp(x^T \beta)`. * the coefficient :math:`\beta` contains 30% "strong" values, 40% "moderate" values and the rest are "weak". They come from :math:`N(0, 10)`, :math:`N(0, 5)` and :math:`N(0, 2)`, respectively. """ def __init__(self, n=100, p=100, k=10, family="multigaussian", rho=0.5, corr_type="const", coef_=None, M=1, sparse_ratio=None): if corr_type == "exp": # generate correlation matrix with exponential decay R = np.zeros((p, p)) for i in range(p): for j in range(i, p): R[i, j] = rho ** abs(i - j) R = R + R.T - np.identity(p) elif corr_type == "const": # generate correlation matrix with constant correlation R = np.ones((p, p)) * rho for i in range(p): R[i, i] = 1 else: raise ValueError( "corr_type should be \'const\' or \'exp\'") X = np.random.multivariate_normal(mean=np.zeros(p), cov=R, size=(n,)) if sparse_ratio is not None: sparse_size = int((1 - sparse_ratio) * n * p) position = sample(n * p, sparse_size) print(position) for i in range(sparse_size): X[int(position[i] / p), position[i] % p] = 0 Nonzero = sample(p, k) # Nonzero = np.array([0, 1, 2]) # Nonzero[:k] = 1 if coef_ is None: Tbeta = sparse_beta_generator(p, Nonzero, k, M) else: Tbeta = coef_ if family in ("multigaussian", "gaussian"): eta = np.matmul(X, Tbeta) y = eta + np.random.normal(0, 1, n * M).reshape(n, M) elif family in ("multinomial", "binomial"): for i in range(M): Tbeta[:, i] = Tbeta[:, i] - Tbeta[:, M - 1] eta = np.exp(np.matmul(X, Tbeta)) # y2 = np.zeros(n) y = np.zeros([n, M]) index = np.linspace(0, M - 1, M) for i in range(n): p = eta[i, :] / np.sum(eta[i, :]) j = np.random.choice(index, size=1, replace=True, p=p) # print(j) y[i, int(j[0])] = 1 # y2[i] = j elif family == "poisson": X = X / 16 eta = np.matmul(X, Tbeta) eta[eta > 30] = 30 eta[eta < -30] = -30 lam = np.exp(eta) y = np.random.poisson(lam=lam) else: raise ValueError( "Family should be \'gaussian\', \'multigaussian\', " "or \'multinomial\'.") self.x = X self.y = y self.coef_ = Tbeta
16,958
34.703158
78
py
abess
abess-master/python/abess/decomposition.py
import numbers import numpy as np from scipy.sparse import coo_matrix, issparse from sklearn.utils.validation import check_array from .pybind_cabess import pywrap_PCA, pywrap_RPCA from .bess_base import bess_base from .utilities import new_data_check class SparsePCA(bess_base): r""" Adaptive Best-Subset Selection(ABESS) algorithm for principal component analysis. Parameters ---------- support_size : array-like, optional default=range(min(n, int(n/(log(log(n))log(p))))). An integer vector representing the alternative support sizes. group : int, optional, default=np.ones(p) The group index for each variable. ic_type : {'aic', 'bic', 'gic', 'ebic', 'loss'}, optional, default='loss' The type of criterion for choosing the support size if `cv=1`. ic_coef : float, optional, default=1.0 Constant that controls the regularization strength on chosen information criterion. cv : int, optional, default=1 The folds number when use the cross-validation method. - If cv=1, cross-validation would not be used. - If cv>1, support size will be chosen by CV's test loss, instead of IC. cv_score : {'test_loss'}, optional, default='test_loss' The score used on test data for CV. Only 'test_loss' is supported for PCA now. thread : int, optional, default=1 Max number of multithreads. - If thread = 0, the maximum number of threads supported by the device will be used. A_init : array-like, optional, default=None Initial active set before the first splicing. always_select : array-like, optional, default=None An array contains the indexes of variables we want to consider in the model. max_iter : int, optional, default=20 Maximum number of iterations taken for the splicing algorithm to converge. Due to the limitation of loss reduction, the splicing algorithm must be able to converge. The number of iterations is only to simplify the implementation. is_warm_start : bool, optional, default=True When tuning the optimal parameter combination, whether to use the last solution as a warm start to accelerate the iterative convergence of the splicing algorithm. screening_size : int, optional, default=-1 The number of variables remaining after screening. It should be a non-negative number smaller than p, but larger than any value in support_size. - If screening_size=-1, screening will not be used. - If screening_size=0, screening_size will be set as :math:`\\min(p, int(n / (\\log(\\log(n))\\log(p))))`. splicing_type: {0, 1}, optional, default=1 The type of splicing. "0" for decreasing by half, "1" for decresing by one. Attributes ---------- coef_ : array-like, shape(p_features, ) or (p_features, k) The first :math:`k` principal axes in feature space, which are sorted by decreasing explained variance. References ---------- - Junxian Zhu, Canhong Wen, Jin Zhu, Heping Zhang, and Xueqin Wang. A polynomial algorithm for best-subset selection problem. Proceedings of the National Academy of Sciences, 117(52):33117-33123, 2020. Examples -------- Results may differ with different version of numpy. >>> ### Sparsity known >>> >>> from abess.decomposition import SparsePCA >>> import numpy as np >>> np.random.seed(12345) >>> model = SparsePCA(support_size = 10) >>> >>> ### X known >>> X = np.random.randn(100, 50) >>> model.fit(X) SparsePCA(support_size=10) >>> print(np.nonzero(model.coef_)[0]) [10 26 31 33 35 36 38 42 43 49] >>> >>> ### X unknown, but Sigma known >>> model.fit(Sigma = np.cov(X.T)) SparsePCA(support_size=10) >>> print(np.nonzero(model.coef_)[0]) [10 26 31 33 35 36 38 42 43 49] """ def __init__(self, support_size=None, group=None, ic_type="loss", ic_coef=1.0, cv=1, cv_score="test_loss", thread=1, A_init=None, always_select=None, max_iter=20, exchange_num=5, is_warm_start=True, splicing_type=1, screening_size=-1, ): super().__init__( algorithm_type="abess", model_type="PCA", normalize_type=1, path_type="seq", max_iter=max_iter, exchange_num=exchange_num, is_warm_start=is_warm_start, support_size=support_size, # s_min=s_min, s_max=s_max, ic_type=ic_type, ic_coef=ic_coef, cv=cv, cv_score=cv_score, screening_size=screening_size, always_select=always_select, thread=thread, A_init=A_init, group=group, splicing_type=splicing_type ) def _more_tags(self): return {'requires_y': False} def transform(self, X): r""" For PCA model, apply dimensionality reduction to given data. Parameters ---------- X : array-like, shape (n_samples, p_features) Sample matrix to be transformed. """ X = new_data_check(self, X) return X.dot(self.coef_) def ratio(self, X): r""" Give new data, and it returns the explained ratio. Parameters ---------- X : array-like, shape (n_samples, n_features) Sample matrix. """ X = new_data_check(self, X) s = np.cov(X.T) # if len(self.coef_.shape) == 1: # explain = self.coef_.T.dot(s).dot(self.coef_) # else: explain = np.sum(np.diag(self.coef_.T.dot(s).dot(self.coef_))) # if isinstance(s, (int, float)): # full = s # else: full = np.sum(np.diag(s)) return explain / full def fit(self, X=None, y=None, is_normal=False, Sigma=None, number=1, n=None, sparse_matrix=False): r""" The fit function is used to transfer the information of data and return the fit result. Parameters ---------- X : array-like, shape(n_samples, p_features) Training data. y : ignore Ignore. is_normal : bool, optional, default=False whether normalize the variables array before fitting the algorithm. weight : array-like, shape(n_samples,), optional, default=np.ones(n) Individual weights for each sample. Only used for is_weight=True. Sigma : array-like, shape(p_features, p_features), optional default=np.cov(X.T). Sample covariance matrix. For PCA, it can be given as input, instead of X. But if X is given, Sigma will be set to np.cov(X.T). number : int, optional, default=1 Indicates the number of PCs returned. n : int, optional, default=X.shape[0] or 1 Sample size. - if X is given, it would be X.shape[0] by default; - if X is not given (Sigma is given), it would be 1 by default. sparse_matrix : bool, optional, default=False Set as True to treat X as sparse matrix during fitting. It would be automatically set as True when X has the sparse matrix type defined in scipy.sparse. """ # Input check if X is not None: if issparse(X): sparse_matrix = True X = check_array(X, accept_sparse=True) n = X.shape[0] p = X.shape[1] X = X - X.mean(axis=0) Sigma = np.cov(X.T) self.n_features_in_ = p X_input = True elif isinstance(Sigma, (list, np.ndarray, np.matrix)): if self.cv > 1: raise ValueError("X should be given to use CV.") Sigma = check_array(Sigma) if (Sigma.shape[0] != Sigma.shape[1] or np.any(Sigma.T != Sigma)): raise ValueError("Sigma should be symmetrical matrix.") if np.any(np.linalg.eigvals(Sigma) < 0): raise ValueError("Sigma should be semi-positive definite.") if n is None: n = 1 p = Sigma.shape[0] X = np.zeros((1, p)) self.n_features_in_ = p is_normal = False X_input = False else: raise ValueError("X or Sigma should be given in PCA.") # # Algorithm_type # if self.algorithm_type == "abess": # algorithm_type_int = 6 # else: # raise ValueError("algorithm_type should not be " + # str(self.algorithm_type)) # for PCA, # model_type_int = 7 path_type_int = 1 # Ic_type: aic, bic, gic, ebic # cv_score: test_loss, roc_auc if self.cv == 1: if self.ic_type == "loss": eval_type_int = 0 elif self.ic_type == "aic": eval_type_int = 1 elif self.ic_type == "bic": eval_type_int = 2 elif self.ic_type == "gic": eval_type_int = 3 elif self.ic_type == "ebic": eval_type_int = 4 elif self.ic_type == "hic": eval_type_int = 5 else: raise ValueError( "ic_type should be \"aic\", \"bic\", \"ebic\"," " \"gic\" or \"hic\".") else: if self.cv_score == "test_loss": eval_type_int = 0 else: raise ValueError( "cv_score should be \"test_loss\".") # cv if (not isinstance(self.cv, int) or self.cv <= 0): raise ValueError("cv should be an positive integer.") if self.cv > n: raise ValueError("cv should be smaller than n.") # Group if self.group is None: g_index = list(range(p)) else: g = np.array(self.group) if g.ndim > 1: raise ValueError("group should be an 1D array of integers.") if g.size != p: raise ValueError( "The length of group should be equal to X.shape[1].") group_set = list(set(g)) g.sort() g_index = [] j = 0 for i in group_set: while g[j] != i: j += 1 g_index.append(j) # path parameter (note that: path_type_int = 1) if self.support_size is None: support_sizes = np.ones(((int(p / 3) + 1), number)) else: if isinstance(self.support_size, (numbers.Real, numbers.Integral)): support_sizes = np.zeros((self.support_size, 1)) support_sizes[self.support_size - 1, 0] = 1 elif (len(self.support_size.shape) != 2 or self.support_size.shape[1] != number): raise ValueError( "`support_size` should be 2-dimension and its number of" " columns should be equal to `number`") elif self.support_size.shape[0] > p: raise ValueError( "`support_size` should not larger than p") else: support_sizes = self.support_size support_sizes = np.array(support_sizes).astype('int32') # screening if self.screening_size != -1: if self.screening_size == 0: self.screening_size = min( p, int(n / (np.log(np.log(n)) * np.log(p)))) elif self.screening_size > p: raise ValueError( "screening size should be smaller than X.shape[1].") elif self.screening_size < np.nonzero(support_sizes)[0].max() + 1: raise ValueError( "screening size should be more than max(support_size).") # unused early_stop = False self.n_iter_ = 1 new_s_min = 0 new_s_max = 0 cv_fold_id = np.array([], dtype="int32") # Exchange_num if (not isinstance(self.exchange_num, int) or self.exchange_num <= 0): raise ValueError("exchange_num should be an positive integer.") # Thread if (not isinstance(self.thread, int) or self.thread < 0): raise ValueError( "thread should be positive number or 0" " (maximum supported by your device).") # Splicing type if self.splicing_type not in (0, 1): raise ValueError("splicing type should be 0 or 1.") # number if (not isinstance(number, int) or number <= 0 or number > p): raise ValueError( "number should be an positive integer and" " not bigger than X.shape[1].") # # Important_search # if (not isinstance(self.important_search, int) # or self.important_search < 0): # raise ValueError( # "important_search should be a non-negative number.") # A_init if self.A_init is None: A_init_list = np.array([], dtype="int32") else: A_init_list = np.array(self.A_init, dtype="int32") if A_init_list.ndim > 1: raise ValueError("The initial active set should be " "an 1D array of integers.") if (A_init_list.min() < 0 or A_init_list.max() >= p): raise ValueError("A_init contains out-of-range index.") # Sparse X if sparse_matrix: if not isinstance(X, (coo_matrix)): # print("sparse matrix 1") nonzero = 0 tmp = np.zeros([X.shape[0] * X.shape[1], 3]) for j in range(X.shape[1]): for i in range(X.shape[0]): if X[i, j] != 0.: tmp[nonzero, :] = np.array([X[i, j], i, j]) nonzero += 1 X = tmp[:nonzero, :] else: # print("sparse matrix 2") tmp = np.zeros([len(X.data), 3]) tmp[:, 1] = X.row tmp[:, 2] = X.col tmp[:, 0] = X.data ind = np.lexsort((tmp[:, 2], tmp[:, 1])) X = tmp[ind, :] # normalize normalize = 0 if is_normal: normalize = self.normalize_type # always_select if self.always_select is None: always_select_list = np.zeros(0, dtype="int32") else: always_select_list = np.array(self.always_select, dtype="int32") # wrap with cpp if (X_input and n < p) or (p <= number): result = [np.ones((p, number))] else: weight = np.ones(n) result = pywrap_PCA( X, weight, n, p, normalize, Sigma, self.max_iter, self.exchange_num, path_type_int, self.is_warm_start, eval_type_int, self.ic_coef, self.cv, g_index, support_sizes, cv_fold_id, new_s_min, new_s_max, self.screening_size, always_select_list, early_stop, self.thread, sparse_matrix, self.splicing_type, self.important_search, number, A_init_list ) self.coef_ = result[0] return self def fit_transform(self, X=None, y=None, is_normal=False, Sigma=None, number=1, n=None, sparse_matrix=False): r""" Fit and transform the sample matrix. Returns transformed data in expected dimension. Parameters ---------- X : array-like, shape(n_samples, p_features) Training data. y : ignore Ignore. is_normal : bool, optional, default=False whether normalize the variables array before fitting the algorithm. weight : array-like, shape(n_samples,), optional, default=np.ones(n) Individual weights for each sample. Only used for is_weight=True. Sigma : array-like, shape(p_features, p_features), optional default=np.cov(X.T). Sample covariance matrix. For PCA, it can be given as input, instead of X. But if X is given, Sigma will be set to np.cov(X.T). number : int, optional, default=1 Indicates the number of PCs returned. n : int, optional, default=X.shape[0] or 1 Sample size. - if X is given, it would be X.shape[0] by default; - if X is not given (Sigma is given), it would be 1 by default. """ self.fit(X, y, is_normal, Sigma, number, n, sparse_matrix) return X @ self.coef_ class RobustPCA(bess_base): r""" Adaptive Best-Subset Selection(ABESS) algorithm for robust principal component analysis. Parameters ---------- support_size : array-like, optional default=range(min(n, int(n/(log(log(n))log(p))))). An integer vector representing the alternative support sizes. ic_type : {'aic', 'bic', 'gic', 'ebic', 'loss'}, optional, default='gic' The type of criterion for choosing the support size. ic_coef : float, optional, default=1.0 Constant that controls the regularization strength on chosen information criterion. thread : int, optional, default=1 Max number of multithreads. - If thread = 0, the maximum number of threads supported by the device will be used. A_init : array-like, optional, default=None Initial active set before the first splicing. always_select : array-like, optional, default=None An array contains the indexes of variables we want to consider in the model. max_iter : int, optional, default=20 Maximum number of iterations taken for the splicing algorithm to converge. Due to the limitation of loss reduction, the splicing algorithm must be able to converge. The number of iterations is only to simplify the implementation. is_warm_start : bool, optional, default=True When tuning the optimal parameter combination, whether to use the last solution as a warm start to accelerate the iterative convergence of the splicing algorithm. splicing_type: {0, 1}, optional, default=1 The type of splicing. "0" for decreasing by half, "1" for decresing by one. Attributes ---------- coef_ : array-like, shape(n_samples, p_features) The transformed sample matrix after robust PCA. References ---------- - Junxian Zhu, Canhong Wen, Jin Zhu, Heping Zhang, and Xueqin Wang. A polynomial algorithm for best-subset selection problem. Proceedings of the National Academy of Sciences, 117(52):33117-33123, 2020. Examples -------- Results may differ with different version of numpy. >>> ### Sparsity known >>> >>> from abess.decomposition import RobustPCA >>> import numpy as np >>> np.random.seed(12345) >>> model = RobustPCA(support_size = 10) >>> >>> ### X known >>> X = np.random.randn(100, 50) >>> model.fit(X, r = 10) RobustPCA(support_size=10) >>> print(np.vstack(np.nonzero(model.coef_))) [[ 6 10 24 30 33 35 40 61 73 85] [43 21 23 30 44 32 49 8 48 19]] """ def __init__(self, support_size=None, ic_type="gic", ic_coef=1.0, thread=1, A_init=None, always_select=None, max_iter=20, exchange_num=5, is_warm_start=True, splicing_type=1 ): super().__init__( algorithm_type="abess", model_type="RPCA", normalize_type=1, path_type="seq", max_iter=max_iter, exchange_num=exchange_num, is_warm_start=is_warm_start, support_size=support_size, s_min=None, s_max=None, cv=1, ic_type=ic_type, ic_coef=ic_coef, always_select=always_select, thread=thread, A_init=A_init, # group=group, splicing_type=splicing_type ) def _more_tags(self): # Note: We ignore estimator's check here because `RobustPCA()` # is not an standard "estimator". # There is no "coefficient", not even "model" to test. # (It just returns the transformation of `X`.) return {'_skip_test': True} def fit(self, X, y=None, r=None, sparse_matrix=False): r""" The fit function is used to transfer the information of data and return the fit result. Parameters ---------- X : array-like, shape(n_samples, p_features) Training data. y : ignore Ignore. r : int Rank of the (recovered) information matrix L. It should be smaller than rank of X (at least smaller than X.shape[1]). sparse_matrix : bool, optional, default=False Set as True to treat X as sparse matrix during fitting. It would be automatically set as True when X has the sparse matrix type defined in scipy.sparse. """ # Input check if X is not None: if issparse(X): sparse_matrix = True X = check_array(X, accept_sparse=True) n = X.shape[0] p = X.shape[1] self.n_features_in_ = p if r is None: r = min(n, p) - 1 else: raise ValueError("X should be an array.") # # Algorithm_type # if self.algorithm_type == "abess": # algorithm_type_int = 6 # else: # raise ValueError("algorithm_type should not be " + # str(self.algorithm_type)) # for RPCA, # model_type_int = 10 path_type_int = 1 # Ic_type if self.ic_type == "aic": eval_type_int = 1 elif self.ic_type == "bic": eval_type_int = 2 elif self.ic_type == "gic": eval_type_int = 3 elif self.ic_type == "ebic": eval_type_int = 4 elif self.ic_type == "hic": eval_type_int = 5 else: raise ValueError( "ic_type should be \"aic\", \"bic\", \"ebic\", \"gic\", " "or \"hic\".") # # Group # if group is None: # g_index = list(range(n * p)) # else: # group = np.array(group) # if group.ndim > 1: # raise ValueError("group should be an 1D array of integers.") # if group.size != n * p: # raise ValueError( # "The length of group should be equal to" # " (X.shape[0] * X.shape[1]).") # g_index = [] # group.sort() # group_set = list(set(group)) # j = 0 # for i in group_set: # while group[j] != i: # j += 1 # g_index.append(j) # path parameter (note that: path_type_int = 1) if self.support_size is None: support_sizes = list(range(0, n * p)) else: if isinstance(self.support_size, (numbers.Real, numbers.Integral)): support_sizes = np.empty(1, dtype=int) support_sizes[0] = self.support_size elif (np.any(np.array(self.support_size) > n * p) or np.any(np.array(self.support_size) < 0)): raise ValueError( "All support_size should be between 0 and X.shape[1]") else: support_sizes = self.support_size support_sizes = np.array(support_sizes).astype('int32') # alphas if r == int(r): alphas = np.array([r], dtype=float) else: raise ValueError("r should be integer") # unused g_index = list(range(n * p)) new_s_min = 0 new_s_max = 0 new_lambda_min = 0 new_lambda_max = 0 # Exchange_num if (not isinstance(self.exchange_num, int) or self.exchange_num <= 0): raise ValueError("exchange_num should be an positive integer.") # Thread if (not isinstance(self.thread, int) or self.thread < 0): raise ValueError( "thread should be positive number or 0" " (maximum supported by your device).") # Splicing type if self.splicing_type not in (0, 1): raise ValueError("splicing type should be 0 or 1.") # # Important_search # if (not isinstance(self.important_search, int) # or self.important_search < 0): # raise ValueError( # "important_search should be a non-negative number.") # A_init if self.A_init is None: A_init_list = np.array([], dtype="int32") else: A_init_list = np.array(self.A_init, dtype="int32") if A_init_list.ndim > 1: raise ValueError("The initial active set should be " "an 1D array of integers.") if (A_init_list.min() < 0 or A_init_list.max() >= n * p): raise ValueError("A_init contains out-of-range index.") # Sparse X if sparse_matrix: if not isinstance(X, (coo_matrix)): # print("sparse matrix 1") nonzero = 0 tmp = np.zeros([X.shape[0] * X.shape[1], 3]) for j in range(X.shape[1]): for i in range(X.shape[0]): if X[i, j] != 0.: tmp[nonzero, :] = np.array([X[i, j], i, j]) nonzero += 1 X = tmp[:nonzero, :] else: # print("sparse matrix 2") tmp = np.zeros([len(X.data), 3]) tmp[:, 1] = X.row tmp[:, 2] = X.col tmp[:, 0] = X.data ind = np.lexsort((tmp[:, 2], tmp[:, 1])) X = tmp[ind, :] # normalize normalize = 0 # always_select if self.always_select is None: always_select_list = np.zeros(0, dtype="int32") else: always_select_list = np.array(self.always_select, dtype="int32") # unused n_lambda = 100 early_stop = False # wrap with cpp if r < 1: result = [X] else: result = pywrap_RPCA( X, n, p, normalize, self.max_iter, self.exchange_num, path_type_int, self.is_warm_start, eval_type_int, self.ic_coef, g_index, support_sizes, alphas, new_s_min, new_s_max, new_lambda_min, new_lambda_max, n_lambda, self.screening_size, always_select_list, self.primary_model_fit_max_iter, self.primary_model_fit_epsilon, early_stop, self.thread, sparse_matrix, self.splicing_type, self.important_search, A_init_list ) self.coef_ = result[0].reshape(p, n).T return self
27,964
34.715198
79
py
abess
abess-master/python/abess/functions.py
# This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import warnings import numbers import operator import numpy import numpy as np from sklearn.utils import ( check_consistent_length, column_or_1d, check_array, check_consistent_length) from sklearn.metrics._regression import ( _num_samples, _check_reg_targets) from sklearn.exceptions import UndefinedMetricWarning from scipy.special import xlogy class StepFunction: """Callable step function. .. math:: f(z) = a * y_i + b, x_i \\leq z < x_{i + 1} Parameters ---------- x : ndarray, shape = (n_points,) Values on the x axis in ascending order. y : ndarray, shape = (n_points,) Corresponding values on the y axis. a : float, optional, default: 1.0 Constant to multiply by. b : float, optional, default: 0.0 Constant offset term. """ def __init__(self, x, y, a=1., b=0.): check_consistent_length(x, y) self.x = x self.y = y self.a = a self.b = b def __call__(self, x): """Evaluate step function. Parameters ---------- x : float|array-like, shape=(n_values,) Values to evaluate step function at. Returns ------- y : float|array-like, shape=(n_values,) Values of step function at `x`. """ x = np.atleast_1d(x) if not np.isfinite(x).all(): raise ValueError("x must be finite") if np.min(x) < self.x[0] or np.max(x) > self.x[-1]: raise ValueError( "x must be within [%f; %f]" % (self.x[0], self.x[-1])) i = np.searchsorted(self.x, x, side='left') not_exact = self.x[i] != x i[not_exact] -= 1 value = self.a * self.y[i] + self.b if value.shape[0] == 1: return value[0] return value # def __repr__(self): # return "StepFunction(x=%r, y=%r, a=%r, b=%r)" % ( # self.x, self.y, self.a, self.b) class BreslowEstimator: r"""Breslow's estimator of the cumulative hazard function. Attributes ---------- cum_baseline_hazard_ : :class:`sksurv.functions.StepFunction` Cumulative baseline hazard function. baseline_survival_ : :class:`sksurv.functions.StepFunction` Baseline survival function. """ def fit(self, linear_predictor, event, time): r"""Compute baseline cumulative hazard function. Parameters ---------- linear_predictor : array-like, shape = (n_samples,) Linear predictor of risk: `X @ coef`. event : array-like, shape = (n_samples,) Contains binary event indicators. time : array-like, shape = (n_samples,) Contains event/censoring times. Returns ------- self """ risk_score = np.exp(linear_predictor) order = np.argsort(time, kind="mergesort") risk_score = risk_score[order] uniq_times, n_events, n_at_risk, _ = self._compute_counts( event, time, order) divisor = np.empty(n_at_risk.shape, dtype=float) value = np.sum(risk_score) divisor[0] = value k = 0 for i in range(1, len(n_at_risk)): d = n_at_risk[i - 1] - n_at_risk[i] value -= risk_score[k:(k + d)].sum() k += d divisor[i] = value assert k == n_at_risk[0] - n_at_risk[-1] y = np.cumsum(n_events / divisor) self.cum_baseline_hazard_ = StepFunction(uniq_times, y) self.baseline_survival_ = StepFunction( self.cum_baseline_hazard_.x, np.exp(- self.cum_baseline_hazard_.y)) return self # def get_cumulative_hazard_function(self, linear_predictor): # r"""Predict cumulative hazard function. # Parameters # ---------- # linear_predictor : array-like, shape = (n_samples,) # Linear predictor of risk: `X @ coef`. # Returns # ------- # cum_hazard : ndarray, shape = (n_samples,) # Predicted cumulative hazard functions. # """ # risk_score = np.exp(linear_predictor) # n_samples = risk_score.shape[0] # funcs = np.empty(n_samples, dtype=object) # for i in range(n_samples): # funcs[i] = StepFunction(x=self.cum_baseline_hazard_.x, # y=self.cum_baseline_hazard_.y, # a=risk_score[i]) # return funcs def get_survival_function(self, linear_predictor): r"""Predict survival function. Parameters ---------- linear_predictor : array-like, shape = (n_samples,) Linear predictor of risk: `X @ coef`. Returns ------- survival : ndarray, shape = (n_samples,) Predicted survival functions. """ risk_score = np.exp(linear_predictor) n_samples = risk_score.shape[0] funcs = np.empty(n_samples, dtype=object) for i in range(n_samples): funcs[i] = StepFunction( x=self.baseline_survival_.x, y=np.power(self.baseline_survival_.y, risk_score[i])) return funcs @staticmethod def _compute_counts(event, time, order=None): """ Count right censored and uncensored samples at each unique time point. Parameters ---------- event : array Boolean event indicator. time : array Survival time or time of censoring. order : array or None Indices to order time in ascending order. If None, order will be computed. Returns ------- times : array Unique time points. n_events : array Number of events at each time point. n_at_risk : array Number of samples that have not been censored or have not had an event at each time point. n_censored : array Number of censored samples at each time point. """ n_samples = event.shape[0] if order is None: order = np.argsort(time, kind="mergesort") uniq_times = np.empty(n_samples, dtype=time.dtype) uniq_events = np.empty(n_samples, dtype=int) uniq_counts = np.empty(n_samples, dtype=int) i = 0 prev_val = time[order[0]] j = 0 while True: count_event = 0 count = 0 while i < n_samples and prev_val == time[order[i]]: if event[order[i]]: count_event += 1 count += 1 i += 1 uniq_times[j] = prev_val uniq_events[j] = count_event uniq_counts[j] = count j += 1 if i == n_samples: break prev_val = time[order[i]] times = np.resize(uniq_times, j) n_events = np.resize(uniq_events, j) total_count = np.resize(uniq_counts, j) n_censored = total_count - n_events # offset cumulative sum by one total_count = np.r_[0, total_count] n_at_risk = n_samples - np.cumsum(total_count) return times, n_events, n_at_risk[:-1], n_censored def check_scalar( x, name, target_type, *, min_val=None, max_val=None, include_boundaries="both", ): """Validate scalar parameters type and value. Parameters ---------- x : object The scalar parameter to validate. name : str The name of the parameter to be printed in error messages. target_type : type or tuple Acceptable data types for the parameter. min_val : float or int, default=None The minimum valid value the parameter can take. If None (default) it is implied that the parameter does not have a lower bound. max_val : float or int, default=None The maximum valid value the parameter can take. If None (default) it is implied that the parameter does not have an upper bound. include_boundaries : {"left", "right", "both", "neither"}, default="both" Whether the interval defined by `min_val` and `max_val` should include the boundaries. Possible choices are: - `"left"`: only `min_val` is included in the valid interval. It is equivalent to the interval `[ min_val, max_val )`. - `"right"`: only `max_val` is included in the valid interval. It is equivalent to the interval `( min_val, max_val ]`. - `"both"`: `min_val` and `max_val` are included in the valid interval. It is equivalent to the interval `[ min_val, max_val ]`. - `"neither"`: neither `min_val` nor `max_val` are included in the valid interval. It is equivalent to the interval `( min_val, max_val )`. Returns ------- x : numbers.Number The validated number. Raises ------ TypeError If the parameter's type does not match the desired type. ValueError If the parameter's value violates the given bounds. If `min_val`, `max_val` and `include_boundaries` are inconsistent. """ def type_name(t): """Convert type into humman readable string.""" module = t.__module__ qualname = t.__qualname__ if module == "builtins": return qualname elif t == numbers.Real: return "float" elif t == numbers.Integral: return "int" return f"{module}.{qualname}" if not isinstance(x, target_type): if isinstance(target_type, tuple): types_str = ", ".join(type_name(t) for t in target_type) target_type_str = f"{{{types_str}}}" else: target_type_str = type_name(target_type) raise TypeError( f"{name} must be an instance of {target_type_str}, not" f" {type(x).__qualname__}." ) expected_include_boundaries = ("left", "right", "both", "neither") if include_boundaries not in expected_include_boundaries: raise ValueError( f"Unknown value for `include_boundaries`: {repr(include_boundaries)}. " f"Possible values are: {expected_include_boundaries}." ) if max_val is None and include_boundaries == "right": raise ValueError( "`include_boundaries`='right' without specifying explicitly `max_val` " "is inconsistent." ) if min_val is None and include_boundaries == "left": raise ValueError( "`include_boundaries`='left' without" " specifying explicitly `min_val` " "is inconsistent." ) comparison_operator = ( operator.lt if include_boundaries in ("left", "both") else operator.le ) if min_val is not None and comparison_operator(x, min_val): raise ValueError( f"{name} == {x}, must be" f" {'>=' if include_boundaries in ('left', 'both') else '>'} " f"{min_val}." ) comparison_operator = ( operator.gt if include_boundaries in ("right", "both") else operator.ge ) if max_val is not None and comparison_operator(x, max_val): raise ValueError( f"{name} == {x}, must be" f" {'<=' if include_boundaries in ('right', 'both') else '<'} " f"{max_val}." ) return x def _mean_tweedie_deviance(y_true, y_pred, sample_weight, power): """Mean Tweedie deviance regression loss.""" p = power if p < 0: # 'Extreme stable', y any real number, y_pred > 0 dev = 2 * ( np.power(np.maximum(y_true, 0), 2 - p) / ((1 - p) * (2 - p)) - y_true * np.power(y_pred, 1 - p) / (1 - p) + np.power(y_pred, 2 - p) / (2 - p) ) elif p == 0: # Normal distribution, y and y_pred any real number dev = (y_true - y_pred) ** 2 elif p == 1: # Poisson distribution dev = 2 * (xlogy(y_true, y_true / y_pred) - y_true + y_pred) elif p == 2: # Gamma distribution dev = 2 * (np.log(y_pred / y_true) + y_true / y_pred - 1) else: dev = 2 * ( np.power(y_true, 2 - p) / ((1 - p) * (2 - p)) - y_true * np.power(y_pred, 1 - p) / (1 - p) + np.power(y_pred, 2 - p) / (2 - p) ) return np.average(dev, weights=sample_weight) def mean_tweedie_deviance(y_true, y_pred, *, sample_weight=None, power=0): """Mean Tweedie deviance regression loss. Read more in the :ref:`User Guide <mean_tweedie_deviance>`. Parameters ---------- y_true : array-like of shape (n_samples,) Ground truth (correct) target values. y_pred : array-like of shape (n_samples,) Estimated target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. power : float, default=0 Tweedie power parameter. Either power <= 0 or power >= 1. The higher `p` the less weight is given to extreme deviations between true and predicted targets. - power < 0: Extreme stable distribution. Requires: y_pred > 0. - power = 0 : Normal distribution, output corresponds to mean_squared_error. y_true and y_pred can be any real numbers. - power = 1 : Poisson distribution. Requires: y_true >= 0 and y_pred > 0. - 1 < p < 2 : Compound Poisson distribution. Requires: y_true >= 0 and y_pred > 0. - power = 2 : Gamma distribution. Requires: y_true > 0 and y_pred > 0. - power = 3 : Inverse Gaussian distribution. Requires: y_true > 0 and y_pred > 0. - otherwise : Positive stable distribution. Requires: y_true > 0 and y_pred > 0. Returns ------- loss : float A non-negative floating point value (the best value is 0.0). Examples -------- >>> from sklearn.metrics import mean_tweedie_deviance >>> y_true = [2, 0, 1, 4] >>> y_pred = [0.5, 0.5, 2., 2.] >>> mean_tweedie_deviance(y_true, y_pred, power=1) 1.4260... """ y_type, y_true, y_pred, _ = _check_reg_targets( y_true, y_pred, None, dtype=[np.float64, np.float32] ) if y_type == "continuous-multioutput": raise ValueError("Multioutput not supported in mean_tweedie_deviance") check_consistent_length(y_true, y_pred, sample_weight) if sample_weight is not None: sample_weight = column_or_1d(sample_weight) sample_weight = sample_weight[:, np.newaxis] p = check_scalar( power, name="power", target_type=numbers.Real, ) message = f"Mean Tweedie deviance error with power={p} can only be used on " if p < 0: # 'Extreme stable', y any real number, y_pred > 0 if (y_pred <= 0).any(): raise ValueError(message + "strictly positive y_pred.") elif p == 0: # Normal, y and y_pred can be any real number pass elif 0 < p < 1: raise ValueError( "Tweedie deviance is only defined for power<=0 and power>=1.") elif 1 <= p < 2: # Poisson and compound Poisson distribution, y >= 0, y_pred > 0 if (y_true < 0).any() or (y_pred <= 0).any(): raise ValueError( message + "non-negative y and strictly positive y_pred.") elif p >= 2: # Gamma and Extreme stable distribution, y and y_pred > 0 if (y_true <= 0).any() or (y_pred <= 0).any(): raise ValueError(message + "strictly positive y and y_pred.") else: # pragma: nocover # Unreachable statement raise ValueError return _mean_tweedie_deviance( y_true, y_pred, sample_weight=sample_weight, power=power ) def d2_tweedie_score(y_true, y_pred, *, sample_weight=None, power=0): """D^2 regression score function, fraction of Tweedie deviance explained. Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A model that always uses the empirical mean of `y_true` as constant prediction, disregarding the input features, gets a D^2 score of 0.0. Read more in the :ref:`User Guide <d2_score>`. .. versionadded:: 1.0 Parameters ---------- y_true : array-like of shape (n_samples,) Ground truth (correct) target values. y_pred : array-like of shape (n_samples,) Estimated target values. sample_weight : array-like of shape (n_samples,), optional Sample weights. power : float, default=0 Tweedie power parameter. Either power <= 0 or power >= 1. The higher `p` the less weight is given to extreme deviations between true and predicted targets. - power < 0: Extreme stable distribution. Requires: y_pred > 0. - power = 0 : Normal distribution, output corresponds to r2_score. y_true and y_pred can be any real numbers. - power = 1 : Poisson distribution. Requires: y_true >= 0 and y_pred > 0. - 1 < p < 2 : Compound Poisson distribution. Requires: y_true >= 0 and y_pred > 0. - power = 2 : Gamma distribution. Requires: y_true > 0 and y_pred > 0. - power = 3 : Inverse Gaussian distribution. Requires: y_true > 0 and y_pred > 0. - otherwise : Positive stable distribution. Requires: y_true > 0 and y_pred > 0. Returns ------- z : float or ndarray of floats The D^2 score. Notes ----- This is not a symmetric function. Like R^2, D^2 score may be negative (it need not actually be the square of a quantity D). This metric is not well-defined for single samples and will return a NaN value if n_samples is less than two. References ---------- .. [1] Eq. (3.11) of Hastie, Trevor J., Robert Tibshirani and Martin J. Wainwright. "Statistical Learning with Sparsity: The Lasso and Generalizations." (2015). https://trevorhastie.github.io Examples -------- >>> from sklearn.metrics import d2_tweedie_score >>> y_true = [0.5, 1, 2.5, 7] >>> y_pred = [1, 1, 5, 3.5] >>> d2_tweedie_score(y_true, y_pred) 0.285... >>> d2_tweedie_score(y_true, y_pred, power=1) 0.487... >>> d2_tweedie_score(y_true, y_pred, power=2) 0.630... >>> d2_tweedie_score(y_true, y_true, power=2) 1.0 """ y_type, y_true, y_pred, _ = _check_reg_targets( y_true, y_pred, None, dtype=[np.float64, np.float32] ) if y_type == "continuous-multioutput": raise ValueError("Multioutput not supported in d2_tweedie_score") if _num_samples(y_pred) < 2: msg = "D^2 score is not well-defined with less than two samples." warnings.warn(msg, UndefinedMetricWarning) return float("nan") y_true, y_pred = np.squeeze(y_true), np.squeeze(y_pred) numerator = mean_tweedie_deviance( y_true, y_pred, sample_weight=sample_weight, power=power ) y_avg = np.average(y_true, weights=sample_weight) denominator = _mean_tweedie_deviance( y_true, y_avg, sample_weight=sample_weight, power=power ) return 1 - numerator / denominator def _check_estimate_1d(estimate, test_time): estimate = check_array(estimate, ensure_2d=False) if estimate.ndim != 1: raise ValueError( 'Expected 1D array, got {}D array instead:\narray={}.\n'.format( estimate.ndim, estimate)) check_consistent_length(test_time, estimate) return estimate def _check_inputs(event_indicator, event_time, estimate): check_consistent_length(event_indicator, event_time, estimate) event_indicator = check_array(event_indicator, ensure_2d=False) event_time = check_array(event_time, ensure_2d=False) estimate = _check_estimate_1d(estimate, event_time) if not numpy.issubdtype(event_indicator.dtype, numpy.bool_): raise ValueError( 'only boolean arrays are supported as class labels ' 'for survival analysis, got {}'.format(event_indicator.dtype)) if len(event_time) < 2: raise ValueError("Need a minimum of two samples") if not event_indicator.any(): raise ValueError("All samples are censored") return event_indicator, event_time, estimate def _get_comparable(event_indicator, event_time, order): n_samples = len(event_time) tied_time = 0 comparable = {} i = 0 while i < n_samples - 1: time_i = event_time[order[i]] start = i + 1 end = start while end < n_samples and event_time[order[end]] == time_i: end += 1 # check for tied event times event_at_same_time = event_indicator[order[i:end]] censored_at_same_time = ~event_at_same_time for j in range(i, end): if event_indicator[order[j]]: mask = numpy.zeros(n_samples, dtype=bool) mask[end:] = True # an event is comparable to censored samples at same time point mask[i:end] = censored_at_same_time comparable[j] = mask tied_time += censored_at_same_time.sum() i = end return comparable, tied_time def _estimate_concordance_index( event_indicator, event_time, estimate, weights, tied_tol=1e-8): order = numpy.argsort(event_time) comparable, tied_time = _get_comparable(event_indicator, event_time, order) # if len(comparable) == 0: # raise NoComparablePairException( # "Data has no comparable pairs, " # "cannot estimate concordance index.") concordant = 0 discordant = 0 tied_risk = 0 numerator = 0.0 denominator = 0.0 for ind, mask in comparable.items(): est_i = estimate[order[ind]] event_i = event_indicator[order[ind]] w_i = weights[order[ind]] est = estimate[order[mask]] msg = 'got censored sample at index {}'.format( order[ind]) + ', but expected uncensored' assert event_i, msg ties = numpy.absolute(est - est_i) <= tied_tol n_ties = ties.sum() # an event should have a higher score con = est < est_i n_con = con[~ties].sum() numerator += w_i * n_con + 0.5 * w_i * n_ties denominator += w_i * mask.sum() tied_risk += n_ties concordant += n_con discordant += est.size - n_con - n_ties cindex = numerator / denominator return cindex, concordant, discordant, tied_risk, tied_time def concordance_index_censored(event_indicator, event_time, estimate, sample_weight=None, tied_tol=1e-8): """Concordance index for right-censored data Reference from scikit-survival: `sksurv.metrics.concordance_index_censored`. The concordance index is defined as the proportion of all comparable pairs in which the predictions and outcomes are concordant. Two samples are comparable if (i) both of them experienced an event (at different times), or (ii) the one with a shorter observed survival time experienced an event, in which case the event-free subject "outlived" the other. A pair is not comparable if they experienced events at the same time. Concordance intuitively means that two samples were ordered correctly by the model. More specifically, two samples are concordant, if the one with a higher estimated risk score has a shorter actual survival time. When predicted risks are identical for a pair, 0.5 rather than 1 is added to the count of concordant pairs. See the :ref:`User Guide </user_guide/evaluating-survival-models.ipynb>` and [1]_ for further description. Parameters ---------- event_indicator : array-like, shape = (n_samples,) Boolean array denotes whether an event occurred event_time : array-like, shape = (n_samples,) Array containing the time of an event or time of censoring estimate : array-like, shape = (n_samples,) Estimated risk of experiencing an event tied_tol : float, optional, default: 1e-8 The tolerance value for considering ties. If the absolute difference between risk scores is smaller or equal than `tied_tol`, risk scores are considered tied. Returns ------- cindex : float Concordance index concordant : int Number of concordant pairs discordant : int Number of discordant pairs tied_risk : int Number of pairs having tied estimated risks tied_time : int Number of comparable pairs sharing the same time See also -------- concordance_index_ipcw Alternative estimator of the concordance index with less bias. References ---------- .. [1] Harrell, F.E., Califf, R.M., Pryor, D.B., Lee, K.L., Rosati, R.A, "Multivariable prognostic models: issues in developing models, evaluating assumptions and adequacy, and measuring and reducing errors", Statistics in Medicine, 15(4), 361-87, 1996. """ event_indicator, event_time, estimate = _check_inputs( event_indicator, event_time, estimate) if sample_weight is None: sample_weight = numpy.ones_like(estimate) return _estimate_concordance_index( event_indicator, event_time, estimate, sample_weight, tied_tol)
26,182
33.67947
83
py
abess
abess-master/python/abess/linear.py
import warnings import numpy as np from sklearn.metrics import r2_score, accuracy_score, ndcg_score from .bess_base import bess_base from .utilities import fix_docs, new_data_check from .functions import (BreslowEstimator, concordance_index_censored) # from .nonparametric import _compute_counts try: from sklearn.metrics import d2_tweedie_score except ImportError: from .functions import d2_tweedie_score @ fix_docs class LogisticRegression(bess_base): r""" Adaptive Best-Subset Selection (ABESS) algorithm for logistic regression. Parameters ---------- splicing_type: {0, 1}, optional, default=0 The type of splicing: "0" for decreasing by half, "1" for decresing by one. important_search : int, optional, default=128 The size of inactive set during updating active set when splicing. It should be a non-positive integer and if important_search=0, it would be set as the size of whole inactive set. Examples -------- Results may differ with different version of numpy. >>> ### Sparsity known >>> >>> from abess.linear import LogisticRegression >>> from abess.datasets import make_glm_data >>> import numpy as np >>> np.random.seed(12345) >>> data = make_glm_data(n = 100, p = 50, k = 10, family = 'binomial') >>> model = LogisticRegression(support_size = 10) >>> model.fit(data.x, data.y) LogisticRegression(support_size=10) >>> model.predict(data.x)[:10] array([0, 1, 0, 1, 1, 1, 0, 0, 0, 1]) >>> ### Sparsity unknown >>> >>> # path_type="seq" >>> model = LogisticRegression(path_type = "seq") >>> model.fit(data.x, data.y) LogisticRegression() >>> model.predict(data.x)[:10] array([0, 1, 0, 1, 1, 1, 0, 0, 0, 1]) >>> >>> # path_type="gs" >>> model = LogisticRegression(path_type="gs") >>> model.fit(data.x, data.y) LogisticRegression(path_type='gs') >>> model.predict(data.x)[:10] array([0, 1, 0, 1, 1, 1, 0, 0, 0, 1]) """ def __init__(self, path_type="seq", support_size=None, s_min=None, s_max=None, group=None, alpha=None, fit_intercept=True, ic_type="ebic", ic_coef=1.0, cv=1, cv_score="roc_auc", thread=1, A_init=None, always_select=None, max_iter=20, exchange_num=5, is_warm_start=True, splicing_type=0, important_search=128, screening_size=-1, primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-8, approximate_Newton=False ): super().__init__( algorithm_type="abess", model_type="Logistic", normalize_type=2, path_type=path_type, max_iter=max_iter, exchange_num=exchange_num, is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, fit_intercept=fit_intercept, s_min=s_min, s_max=s_max, ic_type=ic_type, ic_coef=ic_coef, cv=cv, cv_score=cv_score, screening_size=screening_size, always_select=always_select, primary_model_fit_max_iter=primary_model_fit_max_iter, primary_model_fit_epsilon=primary_model_fit_epsilon, approximate_Newton=approximate_Newton, thread=thread, A_init=A_init, group=group, splicing_type=splicing_type, important_search=important_search, _estimator_type='classifier' ) def _more_tags(self): return {'binary_only': True, 'no_validation': True} def predict_proba(self, X): r""" Give the probabilities of new sample being assigned to different classes. Parameters ---------- X : array-like, shape(n_samples, p_features) Sample matrix to be predicted. Returns ------- proba : array-like, shape(n_samples, 2) Returns the probabilities for class "0" and "1" on given X. """ X = new_data_check(self, X) intercept_ = np.ones(X.shape[0]) * self.intercept_ xbeta = X.dot(self.coef_) + intercept_ proba = np.exp(xbeta) / (1 + np.exp(xbeta)) return np.vstack((np.ones(X.shape[0]) - proba, proba)).T def predict(self, X): r""" This function predicts class label for given data. Parameters ---------- X : array-like, shape(n_samples, p_features) Sample matrix to be predicted. Returns ------- y : array-like, shape(n_samples,) Predict class labels (0 or 1) for samples in X. """ X = new_data_check(self, X) intercept_ = np.ones(X.shape[0]) * self.intercept_ xbeta = X.dot(self.coef_) + intercept_ y = np.repeat(self.classes_[0], xbeta.size) if self.classes_.size == 2: y[xbeta > 0] = self.classes_[1] return y def score(self, X, y, sample_weight=None): r""" Give new data, and it returns the prediction accuracy. Parameters ---------- X : array-like, shape(n_samples, p_features) Sample matrix. y : array-like, shape(n_samples,) Real class labels (0 or 1) for X. sample_weight: array-like, shape(n_samples,), default=None Sample weights. Returns ------- score : float The mean prediction accuracy on the given data. """ if sample_weight is None: sample_weight = np.ones(len(y)) X, y, sample_weight = new_data_check(self, X, y, sample_weight) # intercept_ = np.ones(X.shape[0]) * self.intercept_ # xbeta = X.dot(self.coef_) + intercept_ # xbeta[xbeta > 30] = 30 # xbeta[xbeta < -30] = -30 # pr = np.exp(xbeta) / (1 + np.exp(xbeta)) # return (y * np.log(pr) + # (np.ones(X.shape[0]) - y) * # np.log(np.ones(X.shape[0]) - pr)).sum() y_pred = self.predict(X) return accuracy_score(y, y_pred, sample_weight=sample_weight) @ fix_docs class LinearRegression(bess_base): r""" Adaptive Best-Subset Selection(ABESS) algorithm for linear regression. Parameters ---------- splicing_type: {0, 1}, optional, default=0 The type of splicing: "0" for decreasing by half, "1" for decresing by one. important_search : int, optional, default=128 The size of inactive set during updating active set when splicing. It should be a non-positive integer and if important_search=0, it would be set as the size of whole inactive set. Examples -------- Results may differ with different version of numpy. >>> ### Sparsity known >>> >>> from abess.linear import LinearRegression >>> from abess.datasets import make_glm_data >>> import numpy as np >>> np.random.seed(12345) >>> data = make_glm_data(n = 100, p = 50, k = 10, family = 'gaussian') >>> model = LinearRegression(support_size = 10) >>> model.fit(data.x, data.y) LinearRegression(support_size=10) >>> model.predict(data.x)[:4] array([ -91.02169383, 100.7302593 , -226.99517096, 9.47389912]) >>> ### Sparsity unknown >>> >>> # path_type="seq" >>> model = LinearRegression(path_type = "seq") >>> model.fit(data.x, data.y) LinearRegression() >>> model.predict(data.x)[:4] array([ -91.02169383, 100.7302593 , -226.99517096, 9.47389912]) >>> >>> # path_type="gs" >>> model = LinearRegression(path_type="gs") >>> model.fit(data.x, data.y) LinearRegression(path_type='gs') >>> model.predict(data.x)[:4] array([ -91.02169383, 100.7302593 , -226.99517096, 9.47389912]) """ def __init__(self, path_type="seq", support_size=None, s_min=None, s_max=None, group=None, alpha=None, fit_intercept=True, ic_type="ebic", ic_coef=1.0, cv=1, cv_score="test_loss", thread=1, A_init=None, always_select=None, max_iter=20, exchange_num=5, is_warm_start=True, splicing_type=0, important_search=128, screening_size=-1, covariance_update=False, # primary_model_fit_max_iter=10, # primary_model_fit_epsilon=1e-8, # approximate_Newton=False ): super().__init__( algorithm_type="abess", model_type="Lm", normalize_type=1, path_type=path_type, max_iter=max_iter, exchange_num=exchange_num, is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, fit_intercept=fit_intercept, s_min=s_min, s_max=s_max, ic_type=ic_type, ic_coef=ic_coef, cv=cv, cv_score=cv_score, screening_size=screening_size, always_select=always_select, thread=thread, covariance_update=covariance_update, A_init=A_init, group=group, splicing_type=splicing_type, important_search=important_search, _estimator_type='regressor' ) def _more_tags(self): return {'multioutput': False} def predict(self, X): r""" Predict on given data. Parameters ---------- X : array-like, shape(n_samples, p_features) Sample matrix to be predicted. Returns ------- y : array-like, shape(n_samples,) Prediction of the mean on given X. """ X = new_data_check(self, X) intercept_ = np.ones(X.shape[0]) * self.intercept_ return X.dot(self.coef_) + intercept_ def score(self, X, y, sample_weight=None): r""" Give data, and it returns the coefficient of determination. Parameters ---------- X : array-like, shape(n_samples, p_features) Sample matrix. y : array-like, shape(n_samples, p_features) Real response for given X. sample_weight: array-like, shape(n_samples,), default=None Sample weights. Returns ------- score : float :math:`R^2` score. """ if sample_weight is None: sample_weight = np.ones(len(y)) X, y, sample_weight = new_data_check(self, X, y, sample_weight) y_pred = self.predict(X) return r2_score(y, y_pred, sample_weight=sample_weight) @ fix_docs class CoxPHSurvivalAnalysis(bess_base, BreslowEstimator): r""" Adaptive Best-Subset Selection (ABESS) algorithm for Cox proportional hazards model. Parameters ---------- splicing_type: {0, 1}, optional, default=0 The type of splicing: "0" for decreasing by half, "1" for decresing by one. important_search : int, optional, default=128 The size of inactive set during updating active set when splicing. It should be a non-positive integer and if important_search=0, it would be set as the size of whole inactive set. Examples -------- Results may differ with different version of numpy. >>> ### Sparsity known >>> >>> from abess.linear import CoxPHSurvivalAnalysis >>> from abess.datasets import make_glm_data >>> import numpy as np >>> np.random.seed(12345) >>> data = make_glm_data(n = 100, p = 50, k = 10, family = 'cox') censoring rate:0.6 >>> model = CoxPHSurvivalAnalysis(support_size = 10) >>> model.fit(data.x, data.y) CoxPHSurvivalAnalysis(support_size=10) >>> model.predict(data.x)[:4] array([1.14440127e-01, 2.03621826e+04, 3.06214682e-08, 5.01932889e+02]) >>> ### Sparsity unknown >>> >>> # path_type="seq" >>> model = CoxPHSurvivalAnalysis(path_type = "seq") >>> model.fit(data.x, data.y) CoxPHSurvivalAnalysis() >>> model.predict(data.x)[:4] array([1.36126061e-01, 1.38312962e+04, 5.95470917e-08, 3.87857074e+02]) >>> >>> # path_type="gs" >>> model = CoxPHSurvivalAnalysis(path_type="gs") >>> model.fit(data.x, data.y) CoxPHSurvivalAnalysis(path_type='gs') >>> model.predict(data.x)[:4] array([1.48661058e-01, 1.19376056e+04, 5.80413711e-08, 4.73270508e+02]) """ def __init__(self, path_type="seq", support_size=None, s_min=None, s_max=None, group=None, alpha=None, ic_type="ebic", ic_coef=1.0, cv=1, cv_score="test_loss", thread=1, A_init=None, always_select=None, max_iter=20, exchange_num=5, is_warm_start=True, splicing_type=0, important_search=128, screening_size=-1, primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-8, approximate_Newton=False ): super().__init__( algorithm_type="abess", model_type="Cox", normalize_type=3, path_type=path_type, max_iter=max_iter, exchange_num=exchange_num, is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max, ic_type=ic_type, ic_coef=ic_coef, cv=cv, cv_score=cv_score, screening_size=screening_size, always_select=always_select, primary_model_fit_max_iter=primary_model_fit_max_iter, primary_model_fit_epsilon=primary_model_fit_epsilon, approximate_Newton=approximate_Newton, thread=thread, A_init=A_init, group=group, splicing_type=splicing_type, important_search=important_search, baseline_model=BreslowEstimator() ) def _more_tags(self): # Note: We ignore estimator's check here because it would pass # an 1-column `y` for testing, but for `CoxPHSurvivalAnalysis()`, # 2-column `y` should be given (one for time, another for censoring). return {'_skip_test': True} def predict(self, X): r""" Returns the time-independent part of hazard function, i.e. :math:`\exp(X\beta)` on given data. Parameters ---------- X : array-like, shape(n_samples, p_features) Sample matrix to be predicted. Returns ------- y : array-like, shape(n_samples,) Return :math:`\exp(X\beta)`. """ X = new_data_check(self, X) return np.exp(X.dot(self.coef_)) def score(self, X, y, sample_weight=None): r""" Give data, and it returns C-index. Parameters ---------- X : array-like, shape(n_samples, p_features) Sample matrix. y : array-like, shape(n_samples, p_features) Real response for given X. sample_weight: array-like, shape(n_samples,), default=None Sample weights. Returns ------- score : float C-index. """ if sample_weight is None: sample_weight = np.ones(len(y)) X, y, sample_weight = new_data_check(self, X, y, sample_weight) risk_score = X.dot(self.coef_) y = np.array(y) result = concordance_index_censored( np.array(y[:, 1], np.bool_), y[:, 0], risk_score, sample_weight=sample_weight) return result[0] def predict_survival_function(self, X): r""" Predict survival function. The survival function for an individual with feature vector :math:`x` is defined as .. math:: S(t \mid x) = S_0(t)^{\exp(x^\top \beta)} , where :math:`S_0(t)` is the baseline survival function, estimated by Breslow's estimator. Parameters ---------- X : array-like, shape = (n_samples, n_features) Data matrix. Returns ------- survival : ndarray of :class:`StepFunction`, shape = (n_samples,) Predicted survival functions. """ return self.baseline_model.get_survival_function( np.log(self.predict(X))) @ fix_docs class PoissonRegression(bess_base): r""" Adaptive Best-Subset Selection(ABESS) algorithm for Poisson regression. Parameters ---------- splicing_type: {0, 1}, optional, default=0 The type of splicing: "0" for decreasing by half, "1" for decresing by one. important_search : int, optional, default=128 The size of inactive set during updating active set when splicing. It should be a non-positive integer and if important_search=0, it would be set as the size of whole inactive set. Examples -------- Results may differ with different version of numpy. >>> ### Sparsity known >>> >>> from abess.linear import PoissonRegression >>> from abess.datasets import make_glm_data >>> import numpy as np >>> np.random.seed(12345) >>> data = make_glm_data(n = 100, p = 50, k = 10, family = 'poisson') >>> model = PoissonRegression(support_size = 10) >>> model.fit(data.x, data.y) PoissonRegression(support_size=10) >>> model.predict(data.x)[:4] array([0.51647246, 1.72152904, 0.25906485, 1.11715123]) >>> ### Sparsity unknown >>> >>> # path_type="seq" >>> model = PoissonRegression(path_type = "seq") >>> model.fit(data.x, data.y) PoissonRegression() >>> model.predict(data.x)[:4] array([0.41189011, 1.34910167, 0.28326399, 1.05768798]) >>> >>> # path_type="gs" >>> model = PoissonRegression(path_type="gs") >>> model.fit(data.x, data.y) PoissonRegression(path_type='gs') >>> model.predict(data.x)[:4] array([0.3824694 , 2.72926425, 0.14566451, 1.41221177]) """ def __init__(self, path_type="seq", support_size=None, s_min=None, s_max=None, group=None, alpha=None, fit_intercept=True, ic_type="ebic", ic_coef=1.0, cv=1, cv_score="test_loss", thread=1, A_init=None, always_select=None, max_iter=20, exchange_num=5, is_warm_start=True, splicing_type=0, important_search=128, screening_size=-1, primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-8, approximate_Newton=False ): super().__init__( algorithm_type="abess", model_type="Poisson", normalize_type=2, path_type=path_type, max_iter=max_iter, exchange_num=exchange_num, is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, fit_intercept=fit_intercept, s_min=s_min, s_max=s_max, ic_type=ic_type, ic_coef=ic_coef, cv=cv, cv_score=cv_score, screening_size=screening_size, always_select=always_select, primary_model_fit_max_iter=primary_model_fit_max_iter, primary_model_fit_epsilon=primary_model_fit_epsilon, thread=thread, approximate_Newton=approximate_Newton, A_init=A_init, group=group, splicing_type=splicing_type, important_search=important_search, _estimator_type='regressor' ) def _more_tags(self): return {"poor_score": True} def predict(self, X): r""" Predict on given data. Parameters ---------- X : array-like, shape(n_samples, p_features) Sample matrix to be predicted. Returns ------- y : array-like, shape(n_samples,) Prediction of the mean on X. """ X = new_data_check(self, X) intercept_ = np.ones(X.shape[0]) * self.intercept_ xbeta_exp = np.exp(X.dot(self.coef_) + intercept_) return xbeta_exp def score(self, X, y, sample_weight=None): r""" Give new data, and it returns the :math:`D^2` score. Parameters ---------- X : array-like, shape(n_samples, p_features) Sample matrix. y : array-like, shape(n_samples, p_features) Real response for given X. sample_weight: array-like, shape(n_samples,), default=None Sample weights. Returns ------- score : float :math:`D^2` score. """ if sample_weight is None: sample_weight = np.ones(len(y)) X, y, sample_weight = new_data_check(self, X, y, sample_weight) # intercept_ = np.ones(X.shape[0]) * self.intercept_ # eta = X.dot(self.coef_) + intercept_ # exp_eta = np.exp(eta) # return (y * eta - exp_eta).sum() y_pred = self.predict(X) return d2_tweedie_score(y, y_pred, power=1, sample_weight=sample_weight) @ fix_docs class MultiTaskRegression(bess_base): r""" Adaptive Best-Subset Selection(ABESS) algorithm for multitasklearning. Parameters ---------- splicing_type: {0, 1}, optional, default=0 The type of splicing: "0" for decreasing by half, "1" for decresing by one. important_search : int, optional, default=128 The size of inactive set during updating active set when splicing. It should be a non-positive integer and if important_search=0, it would be set as the size of whole inactive set. Examples -------- Results may differ with different version of numpy. >>> ### Sparsity known >>> >>> from abess.linear import MultiTaskRegression >>> from abess.datasets import make_multivariate_glm_data >>> import numpy as np >>> np.random.seed(12345) >>> data = make_multivariate_glm_data( >>> n = 100, p = 50, k = 10, M = 3, family = 'multigaussian') >>> model = MultiTaskRegression(support_size = 10) >>> model.fit(data.x, data.y) MultiTaskRegression(support_size=10) >>> >>> model.predict(data.x)[:5, ] array([[ 14.8632471 , -3.50042308, 11.88954251], [ 9.50857154, -3.63397256, 17.24496971], [ 27.74599919, -28.29785667, -13.26021431], [ 13.58562727, -1.02215199, 5.06593256], [-29.18519221, 18.64600541, 15.44881672]]) >>> ### Sparsity unknown >>> >>> # path_type="seq" >>> model = MultiTaskRegression(path_type = "seq") >>> model.fit(data.x, data.y) MultiTaskRegression() >>> model.predict(data.x)[:5, ] array([[ 14.67257826, -4.2882759 , 12.050597 ], [ 8.99687125, -5.74834275, 17.67719359], [ 27.60141854, -28.89527087, -13.13808967], [ 13.63623637, -0.81303274, 5.02318398], [-28.48945127, 21.52084036, 14.86113707]]) >>> >>> # path_type="gs" >>> model = MultiTaskRegression(path_type="gs") >>> model.fit(data.x, data.y) MultiTaskRegression(path_type='gs') >>> model.predict(data.x)[:5, ] array([[ 14.67257826, -4.2882759 , 12.050597 ], [ 8.99687125, -5.74834275, 17.67719359], [ 27.60141854, -28.89527087, -13.13808967], [ 13.63623637, -0.81303274, 5.02318398], [-28.48945127, 21.52084036, 14.86113707]]) """ def __init__(self, path_type="seq", support_size=None, s_min=None, s_max=None, group=None, alpha=None, fit_intercept=True, ic_type="ebic", ic_coef=1.0, cv=1, cv_score="test_loss", thread=1, A_init=None, always_select=None, max_iter=20, exchange_num=5, is_warm_start=True, splicing_type=0, important_search=128, screening_size=-1, covariance_update=False, # primary_model_fit_max_iter=10, # primary_model_fit_epsilon=1e-8, # approximate_Newton=False ): super().__init__( algorithm_type="abess", model_type="Multigaussian", normalize_type=1, path_type=path_type, max_iter=max_iter, exchange_num=exchange_num, is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, fit_intercept=fit_intercept, s_min=s_min, s_max=s_max, ic_type=ic_type, ic_coef=ic_coef, cv=cv, cv_score=cv_score, screening_size=screening_size, always_select=always_select, thread=thread, covariance_update=covariance_update, A_init=A_init, group=group, splicing_type=splicing_type, important_search=important_search, _estimator_type='regressor' ) def _more_tags(self): return {'multioutput': True, 'multioutput_only': True} def predict(self, X): r""" Prediction of the mean of each response on given data. Parameters ---------- X : array-like, shape(n_samples, p_features) Sample matrix to be predicted. Returns ------- y : array-like, shape(n_samples, M_responses) Prediction of the mean of each response on given X. Each column indicates one response. """ X = new_data_check(self, X) intercept_ = np.repeat( self.intercept_[np.newaxis, ...], X.shape[0], axis=0) y_pred = X.dot(self.coef_) + intercept_ if len(y_pred.shape) == 1: y_pred = y_pred[:, np.newaxis] return y_pred def score(self, X, y, sample_weight=None): r""" Give data, and it returns the coefficient of determination. Parameters ---------- X : array-like, shape(n_samples, p_features) Sample matrix. y : array-like, shape(n_samples, M_responses) Real responses for given X. sample_weight: array-like, shape(n_samples,), default=None Sample weights. Returns ------- score : float :math:`R^2` score. """ if sample_weight is None: sample_weight = np.ones(len(y)) X, y, sample_weight = new_data_check(self, X, y, sample_weight) y_pred = self.predict(X) return r2_score(y, y_pred, sample_weight=sample_weight) @ fix_docs class MultinomialRegression(bess_base): r""" Adaptive Best-Subset Selection(ABESS) algorithm for multiclassification problem. Parameters ---------- splicing_type: {0, 1}, optional, default=0 The type of splicing: "0" for decreasing by half, "1" for decresing by one. important_search : int, optional, default=128 The size of inactive set during updating active set when splicing. It should be a non-positive integer and if important_search=0, it would be set as the size of whole inactive set. Examples -------- Results may differ with different version of numpy. >>> ### Sparsity known >>> >>> from abess.linear import MultinomialRegression >>> from abess.datasets import make_multivariate_glm_data >>> import numpy as np >>> np.random.seed(12345) >>> data = make_multivariate_glm_data( >>> n = 100, p = 50, k = 10, M = 3, family = 'multinomial') >>> model = MultinomialRegression(support_size = 10) >>> model.fit(data.x, data.y) MultinomialRegression(support_size=10) >>> model.predict(data.x)[:10, ] array([0, 2, 0, 0, 1, 1, 1, 1, 1, 0]) >>> ### Sparsity unknown >>> >>> # path_type="seq" >>> model = MultinomialRegression(path_type = "seq") >>> model.fit(data.x, data.y) MultinomialRegression() >>> model.predict(data.x)[:10, ] array([0, 2, 0, 0, 1, 1, 1, 1, 1, 0]) >>> >>> # path_type="gs" >>> model = MultinomialRegression(path_type="gs") >>> model.fit(data.x, data.y) MultinomialRegression(path_type='gs') >>> model.predict(data.x)[:10, ] array([0, 2, 0, 0, 1, 1, 1, 1, 1, 0]) """ def __init__(self, path_type="seq", support_size=None, s_min=None, s_max=None, group=None, alpha=None, fit_intercept=True, ic_type="ebic", ic_coef=1.0, cv=1, cv_score="test_loss", thread=1, A_init=None, always_select=None, max_iter=20, exchange_num=5, is_warm_start=True, splicing_type=0, important_search=128, screening_size=-1, primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-8, # approximate_Newton=False ): super().__init__( algorithm_type="abess", model_type="Multinomial", normalize_type=2, path_type=path_type, max_iter=max_iter, exchange_num=exchange_num, is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, fit_intercept=fit_intercept, s_min=s_min, s_max=s_max, ic_type=ic_type, ic_coef=ic_coef, cv=cv, cv_score=cv_score, screening_size=screening_size, always_select=always_select, primary_model_fit_max_iter=primary_model_fit_max_iter, primary_model_fit_epsilon=primary_model_fit_epsilon, approximate_Newton=True, thread=thread, A_init=A_init, group=group, splicing_type=splicing_type, important_search=important_search, _estimator_type='classifier' ) def _more_tags(self): return {'multilabel': False, # 'multioutput_only': True, 'no_validation': True, 'poor_score': True} def predict_proba(self, X): r""" Give the probabilities of new data being assigned to different classes. Parameters ---------- X : array-like, shape(n_samples, p_features) Sample matrix to be predicted. Returns ------- proba : array-like, shape(n_samples, M_responses) Returns the probability of given samples for each class. Each column indicates one class. """ X = new_data_check(self, X) intercept_ = np.repeat( self.intercept_[np.newaxis, ...], X.shape[0], axis=0) xbeta = X.dot(self.coef_) + intercept_ eta = np.exp(xbeta) pr = np.zeros_like(xbeta) for i in range(X.shape[0]): pr[i, :] = eta[i, :] / np.sum(eta[i, :]) return pr def predict(self, X): r""" Return the most possible class for given data. Parameters ---------- X : array-like, shape(n_samples, p_features) Sample matrix to be predicted. Returns ------- y : array-like, shape(n_samples, ) Predicted class label for each sample in X. """ X = new_data_check(self, X) intercept_ = np.repeat( self.intercept_[np.newaxis, ...], X.shape[0], axis=0) xbeta = X.dot(self.coef_) + intercept_ max_item = np.argmax(xbeta, axis=1) # y_pred = np.zeros_like(xbeta) # for i in range(X.shape[0]): # y_pred[i, max_item[i]] = 1 cl = getattr(self, "classes_", np.arange(self.coef_.shape[1])) return cl[max_item] def score(self, X, y, sample_weight=None): """ Give new data, and it returns the prediction accuracy. Parameters ---------- X : array-like, shape(n_samples, p_features) Test data. y : array-like, shape(n_samples, M_responses) Test response (dummy variables of real class). sample_weight: array-like, shape(n_samples,), default=None Sample weights. Returns ------- score : float the mean prediction accuracy. """ if sample_weight is None: sample_weight = np.ones(len(y)) X, y, sample_weight = new_data_check(self, X, y, sample_weight) # if (len(y.shape) == 1 or y.shape[1] == 1): # y, _ = categorical_to_dummy(y.squeeze()) # pr = self.predict_proba(X) # return np.sum(y * np.log(pr)) y_true = np.zeros(X.shape[0]) if (len(y.shape) > 1 and y.shape[1] == self.coef_.shape[1]): # if given dummy y y_true = np.nonzero(y)[1] else: y_true = y y_pred = self.predict(X) return accuracy_score(y_true, y_pred, sample_weight=sample_weight) @ fix_docs class GammaRegression(bess_base): r""" Adaptive Best-Subset Selection(ABESS) algorithm for Gamma regression. Parameters ---------- splicing_type: {0, 1}, optional, default=0 The type of splicing: "0" for decreasing by half, "1" for decresing by one. important_search : int, optional, default=128 The size of inactive set during updating active set when splicing. It should be a non-positive integer and if important_search=0, it would be set as the size of whole inactive set. Examples -------- Results may differ with different version of numpy. >>> ### Sparsity known >>> >>> from abess.linear import GammaRegression >>> from abess.datasets import make_glm_data >>> import numpy as np >>> np.random.seed(12345) >>> data = make_glm_data(n = 100, p = 50, k = 10, family = 'gamma') >>> model = GammaRegression(support_size = 10) >>> model.fit(data.x, data.y) GammaRegression(support_size=10) >>> model.predict(data.x)[:4] array([0.01295776, 0.01548078, 0.01221642, 0.01623115]) >>> ### Sparsity unknown >>> >>> # path_type="seq" >>> model = GammaRegression(path_type = "seq") >>> model.fit(data.x, data.y) GammaRegression() >>> model.predict(data.x)[:4] array([0.01779091, 0.01779091, 0.01779091, 0.01779091]) >>> >>> # path_type="gs" >>> model = GammaRegression(path_type="gs") >>> model.fit(data.x, data.y) GammaRegression(path_type='gs') >>> model.predict(data.x)[:4] array([0.01779091, 0.01779091, 0.01779091, 0.01779091]) """ def __init__(self, path_type="seq", support_size=None, s_min=None, s_max=None, group=None, alpha=None, fit_intercept=True, ic_type="ebic", ic_coef=1.0, cv=1, cv_score="test_loss", thread=1, A_init=None, always_select=None, max_iter=20, exchange_num=5, is_warm_start=True, splicing_type=0, important_search=128, screening_size=-1, primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-8, approximate_Newton=False ): super().__init__( algorithm_type="abess", model_type="Gamma", normalize_type=2, path_type=path_type, max_iter=max_iter, exchange_num=exchange_num, is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, fit_intercept=fit_intercept, s_min=s_min, s_max=s_max, ic_type=ic_type, ic_coef=ic_coef, cv=cv, cv_score=cv_score, screening_size=screening_size, always_select=always_select, primary_model_fit_max_iter=primary_model_fit_max_iter, primary_model_fit_epsilon=primary_model_fit_epsilon, thread=thread, approximate_Newton=approximate_Newton, A_init=A_init, group=group, splicing_type=splicing_type, important_search=important_search, _estimator_type='regressor' ) def _more_tags(self): return {'poor_score': True, 'requires_positive_y': True} def predict(self, X): r""" Predict on given data. Parameters ---------- X : array-like, shape(n_samples, p_features) Sample matrix to be predicted. Returns ------- y : array-like, shape(n_samples,) Prediction of the mean on given X. """ X = new_data_check(self, X) intercept_ = np.ones(X.shape[0]) * self.intercept_ xbeta_exp = - 1 / (X.dot(self.coef_) + intercept_) return xbeta_exp def score(self, X, y, sample_weight=None): r""" Give new data, and it returns the prediction error. Parameters ---------- X : array-like, shape(n_samples, p_features) Sample matrix. y : array-like, shape(n_samples, p_features) Real response for given X. sample_weight: array-like, shape(n_samples,), default=None Sample weights. Returns ------- score : float Prediction error. """ # if weights is None: # X = np.array(X) # weights = np.ones(X.shape[0]) # X, y, weights = new_data_check(self, X, y, weights) # def deviance(y, y_pred): # dev = 2 * (np.log(y_pred / y) + y / y_pred - 1) # return np.sum(weights * dev) # y_pred = self.predict(X) # y_mean = np.average(y, weights=weights) # dev = deviance(y, y_pred) # dev_null = deviance(y, y_mean) # return 1 - dev / dev_null if sample_weight is None: sample_weight = np.ones(len(y)) X, y, sample_weight = new_data_check(self, X, y, sample_weight) y_pred = self.predict(X) return d2_tweedie_score(y, y_pred, power=2, sample_weight=sample_weight) @ fix_docs class OrdinalRegression(bess_base): r""" Adaptive Best-Subset Selection(ABESS) algorithm for ordinal regression problem. Parameters ---------- splicing_type: {0, 1}, optional, default=0 The type of splicing: "0" for decreasing by half, "1" for decresing by one. important_search : int, optional, default=128 The size of inactive set during updating active set when splicing. It should be a non-positive integer and if important_search=0, it would be set as the size of whole inactive set. Examples -------- Results may differ with different version of numpy. >>> ### Sparsity known >>> >>> from abess.linear import OrdinalRegression >>> from abess.datasets import make_glm_data >>> import numpy as np >>> np.random.seed(12345) >>> data = make_glm_data(n = 1000, p = 50, k = 10, family = 'ordinal') >>> model = OrdinalRegression(support_size = 10) >>> model.fit(data.x, data.y) OrdinalRegression(support_size=10) >>> model.predict(data.x)[:10] array([2, 1, 1, 1, 2, 0, 2, 1, 2, 1]) >>> ### Sparsity unknown >>> >>> # path_type="seq" >>> model = OrdinalRegression(path_type = "seq") >>> model.fit(data.x, data.y) OrdinalRegression() >>> model.predict(data.x)[:10] array([2, 1, 1, 1, 2, 0, 2, 1, 2, 1]) >>> >>> # path_type="gs" >>> model = OrdinalRegression(path_type="gs") >>> model.fit(data.x, data.y) OrdinalRegression(path_type='gs') >>> model.predict(data.x)[:10] array([2, 1, 1, 1, 2, 0, 2, 1, 2, 1]) """ def __init__(self, path_type="seq", support_size=None, s_min=None, s_max=None, group=None, alpha=None, ic_type="ebic", ic_coef=1.0, cv=1, cv_score="test_loss", thread=1, A_init=None, always_select=None, max_iter=20, exchange_num=5, is_warm_start=True, splicing_type=0, important_search=128, screening_size=-1, primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-8, approximate_Newton=False ): super().__init__( algorithm_type="abess", model_type="Ordinal", normalize_type=2, path_type=path_type, max_iter=max_iter, exchange_num=exchange_num, is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max, ic_type=ic_type, ic_coef=ic_coef, cv=cv, cv_score=cv_score, screening_size=screening_size, always_select=always_select, primary_model_fit_max_iter=primary_model_fit_max_iter, primary_model_fit_epsilon=primary_model_fit_epsilon, approximate_Newton=approximate_Newton, thread=thread, A_init=A_init, group=group, splicing_type=splicing_type, important_search=important_search, # _estimator_type="regressor" ) def predict_proba(self, X): r""" Give the probabilities of new sample being assigned to different classes. Parameters ---------- X : array-like, shape(n_samples, p_features) Sample matrix to be predicted. Returns ------- proba : array-like, shape(n_samples, M_classes) Returns the probabilities for each class on given X. """ X = new_data_check(self, X) M = len(self.intercept_) cdf = (X @ self.coef_)[:, np.newaxis] + self.intercept_ cdf = 1 / (1 + np.exp(-cdf)) proba = np.zeros_like(cdf) proba[:, 0] = cdf[:, 0] proba[:, 1:(M - 1)] = cdf[:, 1:(M - 1)] - cdf[:, 0:(M - 2)] proba[:, M - 1] = 1 - cdf[:, M - 1] return proba def predict(self, X): r""" Return the most possible class label (start from 0) for given data. Parameters ---------- X : array-like, shape(n_samples, p_features) Sample matrix to be predicted. Returns ------- y : array-like, shape(n_samples,) Predict class labels for samples in X. """ proba = self.predict_proba(X) return np.argmax(proba, axis=1) def score(self, X, y, k=None, sample_weight=None, ignore_ties=False): """ Give new data, and it returns normalized discounted cumulative gain. Parameters ---------- X : array-like, shape(n_samples, p_features) Test data. y : array-like, shape(n_samples, ) Test response (class labels for samples in X). k : int, default=None Only consider the highest k scores in the ranking. If None, use all outputs. sample_weight: array-like, shape(n_samples,), default=None Sample weights. ignore_ties : bool, default=False Assume that there are no ties in y_pred (which is likely to be the case if y_score is continuous) for efficiency gains. Returns ------- score : float normalized discounted cumulative gain """ if sample_weight is None: sample_weight = np.ones(len(y)) X, y, sample_weight = new_data_check(self, X, y, sample_weight) unique_ = np.unique(y) class_num = len(unique_) for i in range(class_num): y[y == unique_[i]] = i y_true = class_num - 1 - abs(np.tile(np.arange(len(unique_)), (len(y), 1)) - y[..., np.newaxis]) y_score = self.predict_proba(X) ndcg = ndcg_score(y_true, y_score, k=k, sample_weight=sample_weight, ignore_ties=ignore_ties) return ndcg class abessLogistic(LogisticRegression): warning_msg = ("Class ``abessLogistic`` has been renamed to " "``LogisticRegression``. " "The former will be deprecated in version 0.6.0.") __doc__ = warning_msg + '\n' + LogisticRegression.__doc__ def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None, ic_type="ebic", ic_coef=1.0, cv=1, cv_score="roc_auc", screening_size=-1, always_select=None, primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-8, approximate_Newton=False, thread=1, A_init=None, group=None, splicing_type=0, important_search=128, ): warnings.warn(self.warning_msg, FutureWarning) super().__init__( path_type=path_type, max_iter=max_iter, exchange_num=exchange_num, is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max, ic_type=ic_type, ic_coef=ic_coef, cv=cv, cv_score=cv_score, screening_size=screening_size, always_select=always_select, primary_model_fit_max_iter=primary_model_fit_max_iter, primary_model_fit_epsilon=primary_model_fit_epsilon, approximate_Newton=approximate_Newton, thread=thread, A_init=A_init, group=group, splicing_type=splicing_type, important_search=important_search ) class abessLm(LinearRegression): warning_msg = ("Class ``abessLm`` has been renamed to" " ``LinearRegression``. " "The former will be deprecated in version 0.6.0.") __doc__ = warning_msg + '\n' + LinearRegression.__doc__ def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None, ic_type="ebic", ic_coef=1.0, cv=1, cv_score="test_loss", screening_size=-1, always_select=None, thread=1, covariance_update=False, A_init=None, group=None, splicing_type=0, important_search=128, # primary_model_fit_max_iter=10, # primary_model_fit_epsilon=1e-8, approximate_Newton=False ): warnings.warn(self.warning_msg, FutureWarning) super().__init__( path_type=path_type, max_iter=max_iter, exchange_num=exchange_num, is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max, ic_type=ic_type, ic_coef=ic_coef, cv=cv, cv_score=cv_score, screening_size=screening_size, always_select=always_select, thread=thread, covariance_update=covariance_update, A_init=A_init, group=group, splicing_type=splicing_type, important_search=important_search ) class abessCox(CoxPHSurvivalAnalysis): warning_msg = ("Class ``abessCox`` has been renamed to " "``CoxPHSurvivalAnalysis``. " "The former will be deprecated in version 0.6.0.") __doc__ = warning_msg + '\n' + CoxPHSurvivalAnalysis.__doc__ def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None, ic_type="ebic", ic_coef=1.0, cv=1, cv_score="test_loss", screening_size=-1, always_select=None, primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-8, approximate_Newton=False, thread=1, A_init=None, group=None, splicing_type=0, important_search=128 ): warnings.warn(self.warning_msg, FutureWarning) super().__init__( path_type=path_type, max_iter=max_iter, exchange_num=exchange_num, is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max, ic_type=ic_type, ic_coef=ic_coef, cv=cv, cv_score=cv_score, screening_size=screening_size, always_select=always_select, primary_model_fit_max_iter=primary_model_fit_max_iter, primary_model_fit_epsilon=primary_model_fit_epsilon, approximate_Newton=approximate_Newton, thread=thread, A_init=A_init, group=group, splicing_type=splicing_type, important_search=important_search ) class abessPoisson(PoissonRegression): warning_msg = ("Class ``abessPoisson`` has been renamed to " "``PoissonRegression``. " "The former will be deprecated in version 0.6.0.") __doc__ = warning_msg + '\n' + PoissonRegression.__doc__ def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None, ic_type="ebic", ic_coef=1.0, cv=1, cv_score="test_loss", screening_size=-1, always_select=None, primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-8, thread=1, A_init=None, group=None, splicing_type=0, important_search=128 ): warnings.warn(self.warning_msg, FutureWarning) super().__init__( path_type=path_type, max_iter=max_iter, exchange_num=exchange_num, is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max, ic_type=ic_type, ic_coef=ic_coef, cv=cv, cv_score=cv_score, screening_size=screening_size, always_select=always_select, primary_model_fit_max_iter=primary_model_fit_max_iter, primary_model_fit_epsilon=primary_model_fit_epsilon, thread=thread, A_init=A_init, group=group, splicing_type=splicing_type, important_search=important_search ) class abessMultigaussian(MultiTaskRegression): warning_msg = ("Class ``abessMultigaussian`` has been renamed to " "``MultiTaskRegression``. " "The former will be deprecated in version 0.6.0.") __doc__ = warning_msg + '\n' + MultiTaskRegression.__doc__ def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None, ic_type="ebic", ic_coef=1.0, cv=1, cv_score="test_loss", screening_size=-1, always_select=None, thread=1, covariance_update=False, A_init=None, group=None, splicing_type=0, important_search=128 ): warnings.warn(self.warning_msg, FutureWarning) super().__init__( path_type=path_type, max_iter=max_iter, exchange_num=exchange_num, is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max, ic_type=ic_type, ic_coef=ic_coef, cv=cv, cv_score=cv_score, screening_size=screening_size, always_select=always_select, thread=thread, covariance_update=covariance_update, A_init=A_init, group=group, splicing_type=splicing_type, important_search=important_search ) class abessMultinomial(MultinomialRegression): warning_msg = ("Class ``abessMultinomial`` has been renamed to " "``MultinomialRegression``. " "The former will be deprecated in version 0.6.0.") __doc__ = warning_msg + '\n' + MultinomialRegression.__doc__ def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None, ic_type="ebic", ic_coef=1.0, cv=1, cv_score="test_loss", screening_size=-1, always_select=None, primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-8, # approximate_Newton=False, thread=1, A_init=None, group=None, splicing_type=0, important_search=128 ): warnings.warn(self.warning_msg, FutureWarning) super().__init__( path_type=path_type, max_iter=max_iter, exchange_num=exchange_num, is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max, ic_type=ic_type, ic_coef=ic_coef, cv=cv, cv_score=cv_score, screening_size=screening_size, always_select=always_select, primary_model_fit_max_iter=primary_model_fit_max_iter, primary_model_fit_epsilon=primary_model_fit_epsilon, # approximate_Newton=approximate_Newton, thread=thread, A_init=A_init, group=group, splicing_type=splicing_type, important_search=important_search ) class abessGamma(GammaRegression): warning_msg = ("Class ``abessGamma`` has been renamed to " "``GammaRegression``. " "The former will be deprecated in version 0.6.0.") __doc__ = warning_msg + '\n' + GammaRegression.__doc__ def __init__(self, max_iter=20, exchange_num=5, path_type="seq", is_warm_start=True, support_size=None, alpha=None, s_min=None, s_max=None, ic_type="ebic", ic_coef=1.0, cv=1, cv_score="test_loss", screening_size=-1, always_select=None, primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-8, thread=1, A_init=None, group=None, splicing_type=0, important_search=128 ): warnings.warn(self.warning_msg, FutureWarning) super().__init__( path_type=path_type, max_iter=max_iter, exchange_num=exchange_num, is_warm_start=is_warm_start, support_size=support_size, alpha=alpha, s_min=s_min, s_max=s_max, ic_type=ic_type, ic_coef=ic_coef, cv=cv, cv_score=cv_score, screening_size=screening_size, always_select=always_select, primary_model_fit_max_iter=primary_model_fit_max_iter, primary_model_fit_epsilon=primary_model_fit_epsilon, thread=thread, A_init=A_init, group=group, splicing_type=splicing_type, important_search=important_search )
54,782
36.166214
79
py
abess
abess-master/python/abess/pca.py
import warnings from .decomposition import SparsePCA, RobustPCA # This is the old API for `abess.decomposition` # and will be removed in version 0.6.0. class abessPCA(SparsePCA): warning_msg = ("Class ``abess.pca.abessPCA`` has been renamed to " "``abess.decomposition.SparsePCA``. " "The former will be deprecated in version 0.6.0.") __doc__ = warning_msg + '\n' + SparsePCA.__doc__ def __init__(self, max_iter=20, exchange_num=5, is_warm_start=True, support_size=None, ic_type="loss", ic_coef=1.0, cv=1, cv_score="test_loss", screening_size=-1, always_select=None, thread=1, A_init=None, group=None, splicing_type=1 ): warnings.warn(self.warning_msg, FutureWarning) super().__init__( max_iter=max_iter, exchange_num=exchange_num, is_warm_start=is_warm_start, support_size=support_size, ic_type=ic_type, ic_coef=ic_coef, cv=cv, cv_score=cv_score, screening_size=screening_size, always_select=always_select, thread=thread, A_init=A_init, group=group, splicing_type=splicing_type ) class abessRPCA(RobustPCA): warning_msg = ("Class ``abess.pca.abessRPCA`` has been renamed to " "``abess.decomposition.RobustPCA``. " "The former will be deprecated in version 0.6.0.") __doc__ = warning_msg + '\n' + RobustPCA.__doc__ def __init__(self, max_iter=20, exchange_num=5, is_warm_start=True, support_size=None, ic_type="gic", ic_coef=1.0, always_select=None, thread=1, A_init=None, splicing_type=1 ): warnings.warn(self.warning_msg, FutureWarning) super().__init__( max_iter=max_iter, exchange_num=exchange_num, is_warm_start=is_warm_start, support_size=support_size, ic_type=ic_type, ic_coef=ic_coef, always_select=always_select, thread=thread, A_init=A_init, splicing_type=splicing_type )
2,313
35.730159
73
py
abess
abess-master/python/abess/utilities.py
import numpy as np from sklearn.utils.validation import check_X_y, check_array, check_is_fitted def fix_docs(cls): """ This function is to inherit the docstring from base class and avoid unnecessary duplications on description. """ title_index = cls.__doc__.find("Parameters\n ----------") more_para_index = cls.__doc__.find("Examples\n --------") base_para_index = cls.__bases__[0].__doc__.find( "Attributes\n ----------") if title_index == -1: title_index = 0 if more_para_index == -1: more_para_index = len(cls.__doc__) - 1 # class title full_doc = cls.__doc__[:title_index] # class paras full_doc = (full_doc + cls.__bases__[0].__doc__[:base_para_index] + cls.__doc__[title_index:more_para_index]) # more info full_doc = (full_doc + cls.__doc__[more_para_index:] + cls.__bases__[0].__doc__[base_para_index:]) cls.__doc__ = full_doc return cls def new_data_check(self, X, y=None, weights=None): """ Check new data for predicting, scoring or else. """ # Check1 : whether fit had been called check_is_fitted(self) # Check2 : X validation X = check_array(X, accept_sparse=True) if X.shape[1] != self.n_features_in_: raise ValueError("X.shape[1] should be " + str(self.n_features_in_)) # Check3 : X, y validation if (y is not None) and (weights is None): X, y = check_X_y(X, y, accept_sparse=True, multi_output=True, y_numeric=True) return X, y # Check4: X, y, weights validation if weights is not None: X, y = check_X_y(X, y, accept_sparse=True, multi_output=True, y_numeric=True) weights = np.array(weights, dtype=float) if len(weights.shape) != 1: raise ValueError("weights should be 1-dimension.") if weights.shape[0] != X.shape[0]: raise ValueError("weights should have a length of X.shape[0].") return X, y, weights return X def categorical_to_dummy(x, classes=None): """ Transfer categorical variable into dummy variable. Parameters ---------- x: array-like, shape(n,) Data of the categorical variable. classes: array-like, shape(M,), optional, default=numpy.unique(x) All possible classes in x. If not given, it would be set as numpy.unique(x). Returns ------- dummy_x: array-like, shape(n, M) The transfered dummy data. """ if not classes: classes = np.unique(x) # print("classes: {}".format(classes)) if x.shape == (): x = np.array([x]) n = len(x) M = len(classes) index = dict(zip(classes, np.arange(M))) dummy_x = np.zeros((n, M), dtype=float) for i, x_i in enumerate(x): if x_i in classes: dummy_x[i, index[x_i]] = 1 # else: # print( # "Data {} (index {}) is not in classes.".format( # x_i, # i)) return dummy_x, classes
3,296
29.813084
76
py
abess
abess-master/python/include/Eigen/src/Cholesky/LDLT.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2009 Keir Mierle <mierle@gmail.com> // Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2011 Timothy E. Holy <tim.holy@gmail.com > // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_LDLT_H #define EIGEN_LDLT_H namespace Eigen { namespace internal { template<typename MatrixType, int UpLo> struct LDLT_Traits; // PositiveSemiDef means positive semi-definite and non-zero; same for NegativeSemiDef enum SignMatrix { PositiveSemiDef, NegativeSemiDef, ZeroSign, Indefinite }; } /** \ingroup Cholesky_Module * * \class LDLT * * \brief Robust Cholesky decomposition of a matrix with pivoting * * \tparam _MatrixType the type of the matrix of which to compute the LDL^T Cholesky decomposition * \tparam _UpLo the triangular part that will be used for the decompositon: Lower (default) or Upper. * The other triangular part won't be read. * * Perform a robust Cholesky decomposition of a positive semidefinite or negative semidefinite * matrix \f$ A \f$ such that \f$ A = P^TLDL^*P \f$, where P is a permutation matrix, L * is lower triangular with a unit diagonal and D is a diagonal matrix. * * The decomposition uses pivoting to ensure stability, so that L will have * zeros in the bottom right rank(A) - n submatrix. Avoiding the square root * on D also stabilizes the computation. * * Remember that Cholesky decompositions are not rank-revealing. Also, do not use a Cholesky * decomposition to determine whether a system of equations has a solution. * * This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism. * * \sa MatrixBase::ldlt(), SelfAdjointView::ldlt(), class LLT */ template<typename _MatrixType, int _UpLo> class LDLT { public: typedef _MatrixType MatrixType; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, UpLo = _UpLo }; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar; typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 typedef typename MatrixType::StorageIndex StorageIndex; typedef Matrix<Scalar, RowsAtCompileTime, 1, 0, MaxRowsAtCompileTime, 1> TmpMatrixType; typedef Transpositions<RowsAtCompileTime, MaxRowsAtCompileTime> TranspositionType; typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationType; typedef internal::LDLT_Traits<MatrixType,UpLo> Traits; /** \brief Default Constructor. * * The default constructor is useful in cases in which the user intends to * perform decompositions via LDLT::compute(const MatrixType&). */ LDLT() : m_matrix(), m_transpositions(), m_sign(internal::ZeroSign), m_isInitialized(false) {} /** \brief Default Constructor with memory preallocation * * Like the default constructor but with preallocation of the internal data * according to the specified problem \a size. * \sa LDLT() */ explicit LDLT(Index size) : m_matrix(size, size), m_transpositions(size), m_temporary(size), m_sign(internal::ZeroSign), m_isInitialized(false) {} /** \brief Constructor with decomposition * * This calculates the decomposition for the input \a matrix. * * \sa LDLT(Index size) */ template<typename InputType> explicit LDLT(const EigenBase<InputType>& matrix) : m_matrix(matrix.rows(), matrix.cols()), m_transpositions(matrix.rows()), m_temporary(matrix.rows()), m_sign(internal::ZeroSign), m_isInitialized(false) { compute(matrix.derived()); } /** \brief Constructs a LDLT factorization from a given matrix * * This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when \c MatrixType is a Eigen::Ref. * * \sa LDLT(const EigenBase&) */ template<typename InputType> explicit LDLT(EigenBase<InputType>& matrix) : m_matrix(matrix.derived()), m_transpositions(matrix.rows()), m_temporary(matrix.rows()), m_sign(internal::ZeroSign), m_isInitialized(false) { compute(matrix.derived()); } /** Clear any existing decomposition * \sa rankUpdate(w,sigma) */ void setZero() { m_isInitialized = false; } /** \returns a view of the upper triangular matrix U */ inline typename Traits::MatrixU matrixU() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return Traits::getU(m_matrix); } /** \returns a view of the lower triangular matrix L */ inline typename Traits::MatrixL matrixL() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return Traits::getL(m_matrix); } /** \returns the permutation matrix P as a transposition sequence. */ inline const TranspositionType& transpositionsP() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_transpositions; } /** \returns the coefficients of the diagonal matrix D */ inline Diagonal<const MatrixType> vectorD() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_matrix.diagonal(); } /** \returns true if the matrix is positive (semidefinite) */ inline bool isPositive() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_sign == internal::PositiveSemiDef || m_sign == internal::ZeroSign; } /** \returns true if the matrix is negative (semidefinite) */ inline bool isNegative(void) const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_sign == internal::NegativeSemiDef || m_sign == internal::ZeroSign; } /** \returns a solution x of \f$ A x = b \f$ using the current decomposition of A. * * This function also supports in-place solves using the syntax <tt>x = decompositionObject.solve(x)</tt> . * * \note_about_checking_solutions * * More precisely, this method solves \f$ A x = b \f$ using the decomposition \f$ A = P^T L D L^* P \f$ * by solving the systems \f$ P^T y_1 = b \f$, \f$ L y_2 = y_1 \f$, \f$ D y_3 = y_2 \f$, * \f$ L^* y_4 = y_3 \f$ and \f$ P x = y_4 \f$ in succession. If the matrix \f$ A \f$ is singular, then * \f$ D \f$ will also be singular (all the other matrices are invertible). In that case, the * least-square solution of \f$ D y_3 = y_2 \f$ is computed. This does not mean that this function * computes the least-square solution of \f$ A x = b \f$ is \f$ A \f$ is singular. * * \sa MatrixBase::ldlt(), SelfAdjointView::ldlt() */ template<typename Rhs> inline const Solve<LDLT, Rhs> solve(const MatrixBase<Rhs>& b) const { eigen_assert(m_isInitialized && "LDLT is not initialized."); eigen_assert(m_matrix.rows()==b.rows() && "LDLT::solve(): invalid number of rows of the right hand side matrix b"); return Solve<LDLT, Rhs>(*this, b.derived()); } template<typename Derived> bool solveInPlace(MatrixBase<Derived> &bAndX) const; template<typename InputType> LDLT& compute(const EigenBase<InputType>& matrix); /** \returns an estimate of the reciprocal condition number of the matrix of * which \c *this is the LDLT decomposition. */ RealScalar rcond() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return internal::rcond_estimate_helper(m_l1_norm, *this); } template <typename Derived> LDLT& rankUpdate(const MatrixBase<Derived>& w, const RealScalar& alpha=1); /** \returns the internal LDLT decomposition matrix * * TODO: document the storage layout */ inline const MatrixType& matrixLDLT() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_matrix; } MatrixType reconstructedMatrix() const; /** \returns the adjoint of \c *this, that is, a const reference to the decomposition itself as the underlying matrix is self-adjoint. * * This method is provided for compatibility with other matrix decompositions, thus enabling generic code such as: * \code x = decomposition.adjoint().solve(b) \endcode */ const LDLT& adjoint() const { return *this; }; inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, * \c NumericalIssue if the matrix.appears to be negative. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); return m_info; } #ifndef EIGEN_PARSED_BY_DOXYGEN template<typename RhsType, typename DstType> EIGEN_DEVICE_FUNC void _solve_impl(const RhsType &rhs, DstType &dst) const; #endif protected: static void check_template_parameters() { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); } /** \internal * Used to compute and store the Cholesky decomposition A = L D L^* = U^* D U. * The strict upper part is used during the decomposition, the strict lower * part correspond to the coefficients of L (its diagonal is equal to 1 and * is not stored), and the diagonal entries correspond to D. */ MatrixType m_matrix; RealScalar m_l1_norm; TranspositionType m_transpositions; TmpMatrixType m_temporary; internal::SignMatrix m_sign; bool m_isInitialized; ComputationInfo m_info; }; namespace internal { template<int UpLo> struct ldlt_inplace; template<> struct ldlt_inplace<Lower> { template<typename MatrixType, typename TranspositionType, typename Workspace> static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign) { using std::abs; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename TranspositionType::StorageIndex IndexType; eigen_assert(mat.rows()==mat.cols()); const Index size = mat.rows(); bool found_zero_pivot = false; bool ret = true; if (size <= 1) { transpositions.setIdentity(); if (numext::real(mat.coeff(0,0)) > static_cast<RealScalar>(0) ) sign = PositiveSemiDef; else if (numext::real(mat.coeff(0,0)) < static_cast<RealScalar>(0)) sign = NegativeSemiDef; else sign = ZeroSign; return true; } for (Index k = 0; k < size; ++k) { // Find largest diagonal element Index index_of_biggest_in_corner; mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner); index_of_biggest_in_corner += k; transpositions.coeffRef(k) = IndexType(index_of_biggest_in_corner); if(k != index_of_biggest_in_corner) { // apply the transposition while taking care to consider only // the lower triangular part Index s = size-index_of_biggest_in_corner-1; // trailing size after the biggest element mat.row(k).head(k).swap(mat.row(index_of_biggest_in_corner).head(k)); mat.col(k).tail(s).swap(mat.col(index_of_biggest_in_corner).tail(s)); std::swap(mat.coeffRef(k,k),mat.coeffRef(index_of_biggest_in_corner,index_of_biggest_in_corner)); for(Index i=k+1;i<index_of_biggest_in_corner;++i) { Scalar tmp = mat.coeffRef(i,k); mat.coeffRef(i,k) = numext::conj(mat.coeffRef(index_of_biggest_in_corner,i)); mat.coeffRef(index_of_biggest_in_corner,i) = numext::conj(tmp); } if(NumTraits<Scalar>::IsComplex) mat.coeffRef(index_of_biggest_in_corner,k) = numext::conj(mat.coeff(index_of_biggest_in_corner,k)); } // partition the matrix: // A00 | - | - // lu = A10 | A11 | - // A20 | A21 | A22 Index rs = size - k - 1; Block<MatrixType,Dynamic,1> A21(mat,k+1,k,rs,1); Block<MatrixType,1,Dynamic> A10(mat,k,0,1,k); Block<MatrixType,Dynamic,Dynamic> A20(mat,k+1,0,rs,k); if(k>0) { temp.head(k) = mat.diagonal().real().head(k).asDiagonal() * A10.adjoint(); mat.coeffRef(k,k) -= (A10 * temp.head(k)).value(); if(rs>0) A21.noalias() -= A20 * temp.head(k); } // In some previous versions of Eigen (e.g., 3.2.1), the scaling was omitted if the pivot // was smaller than the cutoff value. However, since LDLT is not rank-revealing // we should only make sure that we do not introduce INF or NaN values. // Remark that LAPACK also uses 0 as the cutoff value. RealScalar realAkk = numext::real(mat.coeffRef(k,k)); bool pivot_is_valid = (abs(realAkk) > RealScalar(0)); if(k==0 && !pivot_is_valid) { // The entire diagonal is zero, there is nothing more to do // except filling the transpositions, and checking whether the matrix is zero. sign = ZeroSign; for(Index j = 0; j<size; ++j) { transpositions.coeffRef(j) = IndexType(j); ret = ret && (mat.col(j).tail(size-j-1).array()==Scalar(0)).all(); } return ret; } if((rs>0) && pivot_is_valid) A21 /= realAkk; if(found_zero_pivot && pivot_is_valid) ret = false; // factorization failed else if(!pivot_is_valid) found_zero_pivot = true; if (sign == PositiveSemiDef) { if (realAkk < static_cast<RealScalar>(0)) sign = Indefinite; } else if (sign == NegativeSemiDef) { if (realAkk > static_cast<RealScalar>(0)) sign = Indefinite; } else if (sign == ZeroSign) { if (realAkk > static_cast<RealScalar>(0)) sign = PositiveSemiDef; else if (realAkk < static_cast<RealScalar>(0)) sign = NegativeSemiDef; } } return ret; } // Reference for the algorithm: Davis and Hager, "Multiple Rank // Modifications of a Sparse Cholesky Factorization" (Algorithm 1) // Trivial rearrangements of their computations (Timothy E. Holy) // allow their algorithm to work for rank-1 updates even if the // original matrix is not of full rank. // Here only rank-1 updates are implemented, to reduce the // requirement for intermediate storage and improve accuracy template<typename MatrixType, typename WDerived> static bool updateInPlace(MatrixType& mat, MatrixBase<WDerived>& w, const typename MatrixType::RealScalar& sigma=1) { using numext::isfinite; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; const Index size = mat.rows(); eigen_assert(mat.cols() == size && w.size()==size); RealScalar alpha = 1; // Apply the update for (Index j = 0; j < size; j++) { // Check for termination due to an original decomposition of low-rank if (!(isfinite)(alpha)) break; // Update the diagonal terms RealScalar dj = numext::real(mat.coeff(j,j)); Scalar wj = w.coeff(j); RealScalar swj2 = sigma*numext::abs2(wj); RealScalar gamma = dj*alpha + swj2; mat.coeffRef(j,j) += swj2/alpha; alpha += swj2/dj; // Update the terms of L Index rs = size-j-1; w.tail(rs) -= wj * mat.col(j).tail(rs); if(gamma != 0) mat.col(j).tail(rs) += (sigma*numext::conj(wj)/gamma)*w.tail(rs); } return true; } template<typename MatrixType, typename TranspositionType, typename Workspace, typename WType> static bool update(MatrixType& mat, const TranspositionType& transpositions, Workspace& tmp, const WType& w, const typename MatrixType::RealScalar& sigma=1) { // Apply the permutation to the input w tmp = transpositions * w; return ldlt_inplace<Lower>::updateInPlace(mat,tmp,sigma); } }; template<> struct ldlt_inplace<Upper> { template<typename MatrixType, typename TranspositionType, typename Workspace> static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign) { Transpose<MatrixType> matt(mat); return ldlt_inplace<Lower>::unblocked(matt, transpositions, temp, sign); } template<typename MatrixType, typename TranspositionType, typename Workspace, typename WType> static EIGEN_STRONG_INLINE bool update(MatrixType& mat, TranspositionType& transpositions, Workspace& tmp, WType& w, const typename MatrixType::RealScalar& sigma=1) { Transpose<MatrixType> matt(mat); return ldlt_inplace<Lower>::update(matt, transpositions, tmp, w.conjugate(), sigma); } }; template<typename MatrixType> struct LDLT_Traits<MatrixType,Lower> { typedef const TriangularView<const MatrixType, UnitLower> MatrixL; typedef const TriangularView<const typename MatrixType::AdjointReturnType, UnitUpper> MatrixU; static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); } static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); } }; template<typename MatrixType> struct LDLT_Traits<MatrixType,Upper> { typedef const TriangularView<const typename MatrixType::AdjointReturnType, UnitLower> MatrixL; typedef const TriangularView<const MatrixType, UnitUpper> MatrixU; static inline MatrixL getL(const MatrixType& m) { return MatrixL(m.adjoint()); } static inline MatrixU getU(const MatrixType& m) { return MatrixU(m); } }; } // end namespace internal /** Compute / recompute the LDLT decomposition A = L D L^* = U^* D U of \a matrix */ template<typename MatrixType, int _UpLo> template<typename InputType> LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const EigenBase<InputType>& a) { check_template_parameters(); eigen_assert(a.rows()==a.cols()); const Index size = a.rows(); m_matrix = a.derived(); // Compute matrix L1 norm = max abs column sum. m_l1_norm = RealScalar(0); // TODO move this code to SelfAdjointView for (Index col = 0; col < size; ++col) { RealScalar abs_col_sum; if (_UpLo == Lower) abs_col_sum = m_matrix.col(col).tail(size - col).template lpNorm<1>() + m_matrix.row(col).head(col).template lpNorm<1>(); else abs_col_sum = m_matrix.col(col).head(col).template lpNorm<1>() + m_matrix.row(col).tail(size - col).template lpNorm<1>(); if (abs_col_sum > m_l1_norm) m_l1_norm = abs_col_sum; } m_transpositions.resize(size); m_isInitialized = false; m_temporary.resize(size); m_sign = internal::ZeroSign; m_info = internal::ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, m_sign) ? Success : NumericalIssue; m_isInitialized = true; return *this; } /** Update the LDLT decomposition: given A = L D L^T, efficiently compute the decomposition of A + sigma w w^T. * \param w a vector to be incorporated into the decomposition. * \param sigma a scalar, +1 for updates and -1 for "downdates," which correspond to removing previously-added column vectors. Optional; default value is +1. * \sa setZero() */ template<typename MatrixType, int _UpLo> template<typename Derived> LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w, const typename LDLT<MatrixType,_UpLo>::RealScalar& sigma) { typedef typename TranspositionType::StorageIndex IndexType; const Index size = w.rows(); if (m_isInitialized) { eigen_assert(m_matrix.rows()==size); } else { m_matrix.resize(size,size); m_matrix.setZero(); m_transpositions.resize(size); for (Index i = 0; i < size; i++) m_transpositions.coeffRef(i) = IndexType(i); m_temporary.resize(size); m_sign = sigma>=0 ? internal::PositiveSemiDef : internal::NegativeSemiDef; m_isInitialized = true; } internal::ldlt_inplace<UpLo>::update(m_matrix, m_transpositions, m_temporary, w, sigma); return *this; } #ifndef EIGEN_PARSED_BY_DOXYGEN template<typename _MatrixType, int _UpLo> template<typename RhsType, typename DstType> void LDLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) const { eigen_assert(rhs.rows() == rows()); // dst = P b dst = m_transpositions * rhs; // dst = L^-1 (P b) matrixL().solveInPlace(dst); // dst = D^-1 (L^-1 P b) // more precisely, use pseudo-inverse of D (see bug 241) using std::abs; const typename Diagonal<const MatrixType>::RealReturnType vecD(vectorD()); // In some previous versions, tolerance was set to the max of 1/highest and the maximal diagonal entry * epsilon // as motivated by LAPACK's xGELSS: // RealScalar tolerance = numext::maxi(vecD.array().abs().maxCoeff() * NumTraits<RealScalar>::epsilon(),RealScalar(1) / NumTraits<RealScalar>::highest()); // However, LDLT is not rank revealing, and so adjusting the tolerance wrt to the highest // diagonal element is not well justified and leads to numerical issues in some cases. // Moreover, Lapack's xSYTRS routines use 0 for the tolerance. RealScalar tolerance = RealScalar(1) / NumTraits<RealScalar>::highest(); for (Index i = 0; i < vecD.size(); ++i) { if(abs(vecD(i)) > tolerance) dst.row(i) /= vecD(i); else dst.row(i).setZero(); } // dst = L^-T (D^-1 L^-1 P b) matrixU().solveInPlace(dst); // dst = P^-1 (L^-T D^-1 L^-1 P b) = A^-1 b dst = m_transpositions.transpose() * dst; } #endif /** \internal use x = ldlt_object.solve(x); * * This is the \em in-place version of solve(). * * \param bAndX represents both the right-hand side matrix b and result x. * * \returns true always! If you need to check for existence of solutions, use another decomposition like LU, QR, or SVD. * * This version avoids a copy when the right hand side matrix b is not * needed anymore. * * \sa LDLT::solve(), MatrixBase::ldlt() */ template<typename MatrixType,int _UpLo> template<typename Derived> bool LDLT<MatrixType,_UpLo>::solveInPlace(MatrixBase<Derived> &bAndX) const { eigen_assert(m_isInitialized && "LDLT is not initialized."); eigen_assert(m_matrix.rows() == bAndX.rows()); bAndX = this->solve(bAndX); return true; } /** \returns the matrix represented by the decomposition, * i.e., it returns the product: P^T L D L^* P. * This function is provided for debug purpose. */ template<typename MatrixType, int _UpLo> MatrixType LDLT<MatrixType,_UpLo>::reconstructedMatrix() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); const Index size = m_matrix.rows(); MatrixType res(size,size); // P res.setIdentity(); res = transpositionsP() * res; // L^* P res = matrixU() * res; // D(L^*P) res = vectorD().real().asDiagonal() * res; // L(DL^*P) res = matrixL() * res; // P^T (LDL^*P) res = transpositionsP().transpose() * res; return res; } /** \cholesky_module * \returns the Cholesky decomposition with full pivoting without square root of \c *this * \sa MatrixBase::ldlt() */ template<typename MatrixType, unsigned int UpLo> inline const LDLT<typename SelfAdjointView<MatrixType, UpLo>::PlainObject, UpLo> SelfAdjointView<MatrixType, UpLo>::ldlt() const { return LDLT<PlainObject,UpLo>(m_matrix); } /** \cholesky_module * \returns the Cholesky decomposition with full pivoting without square root of \c *this * \sa SelfAdjointView::ldlt() */ template<typename Derived> inline const LDLT<typename MatrixBase<Derived>::PlainObject> MatrixBase<Derived>::ldlt() const { return LDLT<PlainObject>(derived()); } } // end namespace Eigen #endif // EIGEN_LDLT_H
24,254
35.201493
166
h
abess
abess-master/python/include/Eigen/src/Cholesky/LLT.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_LLT_H #define EIGEN_LLT_H namespace Eigen { namespace internal{ template<typename MatrixType, int UpLo> struct LLT_Traits; } /** \ingroup Cholesky_Module * * \class LLT * * \brief Standard Cholesky decomposition (LL^T) of a matrix and associated features * * \tparam _MatrixType the type of the matrix of which we are computing the LL^T Cholesky decomposition * \tparam _UpLo the triangular part that will be used for the decompositon: Lower (default) or Upper. * The other triangular part won't be read. * * This class performs a LL^T Cholesky decomposition of a symmetric, positive definite * matrix A such that A = LL^* = U^*U, where L is lower triangular. * * While the Cholesky decomposition is particularly useful to solve selfadjoint problems like D^*D x = b, * for that purpose, we recommend the Cholesky decomposition without square root which is more stable * and even faster. Nevertheless, this standard Cholesky decomposition remains useful in many other * situations like generalised eigen problems with hermitian matrices. * * Remember that Cholesky decompositions are not rank-revealing. This LLT decomposition is only stable on positive definite matrices, * use LDLT instead for the semidefinite case. Also, do not use a Cholesky decomposition to determine whether a system of equations * has a solution. * * Example: \include LLT_example.cpp * Output: \verbinclude LLT_example.out * * This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism. * * \sa MatrixBase::llt(), SelfAdjointView::llt(), class LDLT */ /* HEY THIS DOX IS DISABLED BECAUSE THERE's A BUG EITHER HERE OR IN LDLT ABOUT THAT (OR BOTH) * Note that during the decomposition, only the upper triangular part of A is considered. Therefore, * the strict lower part does not have to store correct values. */ template<typename _MatrixType, int _UpLo> class LLT { public: typedef _MatrixType MatrixType; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; typedef typename MatrixType::Scalar Scalar; typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar; typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 typedef typename MatrixType::StorageIndex StorageIndex; enum { PacketSize = internal::packet_traits<Scalar>::size, AlignmentMask = int(PacketSize)-1, UpLo = _UpLo }; typedef internal::LLT_Traits<MatrixType,UpLo> Traits; /** * \brief Default Constructor. * * The default constructor is useful in cases in which the user intends to * perform decompositions via LLT::compute(const MatrixType&). */ LLT() : m_matrix(), m_isInitialized(false) {} /** \brief Default Constructor with memory preallocation * * Like the default constructor but with preallocation of the internal data * according to the specified problem \a size. * \sa LLT() */ explicit LLT(Index size) : m_matrix(size, size), m_isInitialized(false) {} template<typename InputType> explicit LLT(const EigenBase<InputType>& matrix) : m_matrix(matrix.rows(), matrix.cols()), m_isInitialized(false) { compute(matrix.derived()); } /** \brief Constructs a LDLT factorization from a given matrix * * This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when * \c MatrixType is a Eigen::Ref. * * \sa LLT(const EigenBase&) */ template<typename InputType> explicit LLT(EigenBase<InputType>& matrix) : m_matrix(matrix.derived()), m_isInitialized(false) { compute(matrix.derived()); } /** \returns a view of the upper triangular matrix U */ inline typename Traits::MatrixU matrixU() const { eigen_assert(m_isInitialized && "LLT is not initialized."); return Traits::getU(m_matrix); } /** \returns a view of the lower triangular matrix L */ inline typename Traits::MatrixL matrixL() const { eigen_assert(m_isInitialized && "LLT is not initialized."); return Traits::getL(m_matrix); } /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. * * Since this LLT class assumes anyway that the matrix A is invertible, the solution * theoretically exists and is unique regardless of b. * * Example: \include LLT_solve.cpp * Output: \verbinclude LLT_solve.out * * \sa solveInPlace(), MatrixBase::llt(), SelfAdjointView::llt() */ template<typename Rhs> inline const Solve<LLT, Rhs> solve(const MatrixBase<Rhs>& b) const { eigen_assert(m_isInitialized && "LLT is not initialized."); eigen_assert(m_matrix.rows()==b.rows() && "LLT::solve(): invalid number of rows of the right hand side matrix b"); return Solve<LLT, Rhs>(*this, b.derived()); } template<typename Derived> void solveInPlace(MatrixBase<Derived> &bAndX) const; template<typename InputType> LLT& compute(const EigenBase<InputType>& matrix); /** \returns an estimate of the reciprocal condition number of the matrix of * which \c *this is the Cholesky decomposition. */ RealScalar rcond() const { eigen_assert(m_isInitialized && "LLT is not initialized."); eigen_assert(m_info == Success && "LLT failed because matrix appears to be negative"); return internal::rcond_estimate_helper(m_l1_norm, *this); } /** \returns the LLT decomposition matrix * * TODO: document the storage layout */ inline const MatrixType& matrixLLT() const { eigen_assert(m_isInitialized && "LLT is not initialized."); return m_matrix; } MatrixType reconstructedMatrix() const; /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, * \c NumericalIssue if the matrix.appears to be negative. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "LLT is not initialized."); return m_info; } /** \returns the adjoint of \c *this, that is, a const reference to the decomposition itself as the underlying matrix is self-adjoint. * * This method is provided for compatibility with other matrix decompositions, thus enabling generic code such as: * \code x = decomposition.adjoint().solve(b) \endcode */ const LLT& adjoint() const { return *this; }; inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } template<typename VectorType> LLT rankUpdate(const VectorType& vec, const RealScalar& sigma = 1); #ifndef EIGEN_PARSED_BY_DOXYGEN template<typename RhsType, typename DstType> EIGEN_DEVICE_FUNC void _solve_impl(const RhsType &rhs, DstType &dst) const; #endif protected: static void check_template_parameters() { EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); } /** \internal * Used to compute and store L * The strict upper part is not used and even not initialized. */ MatrixType m_matrix; RealScalar m_l1_norm; bool m_isInitialized; ComputationInfo m_info; }; namespace internal { template<typename Scalar, int UpLo> struct llt_inplace; template<typename MatrixType, typename VectorType> static Index llt_rank_update_lower(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) { using std::sqrt; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::ColXpr ColXpr; typedef typename internal::remove_all<ColXpr>::type ColXprCleaned; typedef typename ColXprCleaned::SegmentReturnType ColXprSegment; typedef Matrix<Scalar,Dynamic,1> TempVectorType; typedef typename TempVectorType::SegmentReturnType TempVecSegment; Index n = mat.cols(); eigen_assert(mat.rows()==n && vec.size()==n); TempVectorType temp; if(sigma>0) { // This version is based on Givens rotations. // It is faster than the other one below, but only works for updates, // i.e., for sigma > 0 temp = sqrt(sigma) * vec; for(Index i=0; i<n; ++i) { JacobiRotation<Scalar> g; g.makeGivens(mat(i,i), -temp(i), &mat(i,i)); Index rs = n-i-1; if(rs>0) { ColXprSegment x(mat.col(i).tail(rs)); TempVecSegment y(temp.tail(rs)); apply_rotation_in_the_plane(x, y, g); } } } else { temp = vec; RealScalar beta = 1; for(Index j=0; j<n; ++j) { RealScalar Ljj = numext::real(mat.coeff(j,j)); RealScalar dj = numext::abs2(Ljj); Scalar wj = temp.coeff(j); RealScalar swj2 = sigma*numext::abs2(wj); RealScalar gamma = dj*beta + swj2; RealScalar x = dj + swj2/beta; if (x<=RealScalar(0)) return j; RealScalar nLjj = sqrt(x); mat.coeffRef(j,j) = nLjj; beta += swj2/dj; // Update the terms of L Index rs = n-j-1; if(rs) { temp.tail(rs) -= (wj/Ljj) * mat.col(j).tail(rs); if(gamma != 0) mat.col(j).tail(rs) = (nLjj/Ljj) * mat.col(j).tail(rs) + (nLjj * sigma*numext::conj(wj)/gamma)*temp.tail(rs); } } } return -1; } template<typename Scalar> struct llt_inplace<Scalar, Lower> { typedef typename NumTraits<Scalar>::Real RealScalar; template<typename MatrixType> static Index unblocked(MatrixType& mat) { using std::sqrt; eigen_assert(mat.rows()==mat.cols()); const Index size = mat.rows(); for(Index k = 0; k < size; ++k) { Index rs = size-k-1; // remaining size Block<MatrixType,Dynamic,1> A21(mat,k+1,k,rs,1); Block<MatrixType,1,Dynamic> A10(mat,k,0,1,k); Block<MatrixType,Dynamic,Dynamic> A20(mat,k+1,0,rs,k); RealScalar x = numext::real(mat.coeff(k,k)); if (k>0) x -= A10.squaredNorm(); if (x<=RealScalar(0)) return k; mat.coeffRef(k,k) = x = sqrt(x); if (k>0 && rs>0) A21.noalias() -= A20 * A10.adjoint(); if (rs>0) A21 /= x; } return -1; } template<typename MatrixType> static Index blocked(MatrixType& m) { eigen_assert(m.rows()==m.cols()); Index size = m.rows(); if(size<32) return unblocked(m); Index blockSize = size/8; blockSize = (blockSize/16)*16; blockSize = (std::min)((std::max)(blockSize,Index(8)), Index(128)); for (Index k=0; k<size; k+=blockSize) { // partition the matrix: // A00 | - | - // lu = A10 | A11 | - // A20 | A21 | A22 Index bs = (std::min)(blockSize, size-k); Index rs = size - k - bs; Block<MatrixType,Dynamic,Dynamic> A11(m,k, k, bs,bs); Block<MatrixType,Dynamic,Dynamic> A21(m,k+bs,k, rs,bs); Block<MatrixType,Dynamic,Dynamic> A22(m,k+bs,k+bs,rs,rs); Index ret; if((ret=unblocked(A11))>=0) return k+ret; if(rs>0) A11.adjoint().template triangularView<Upper>().template solveInPlace<OnTheRight>(A21); if(rs>0) A22.template selfadjointView<Lower>().rankUpdate(A21,typename NumTraits<RealScalar>::Literal(-1)); // bottleneck } return -1; } template<typename MatrixType, typename VectorType> static Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma) { return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); } }; template<typename Scalar> struct llt_inplace<Scalar, Upper> { typedef typename NumTraits<Scalar>::Real RealScalar; template<typename MatrixType> static EIGEN_STRONG_INLINE Index unblocked(MatrixType& mat) { Transpose<MatrixType> matt(mat); return llt_inplace<Scalar, Lower>::unblocked(matt); } template<typename MatrixType> static EIGEN_STRONG_INLINE Index blocked(MatrixType& mat) { Transpose<MatrixType> matt(mat); return llt_inplace<Scalar, Lower>::blocked(matt); } template<typename MatrixType, typename VectorType> static Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma) { Transpose<MatrixType> matt(mat); return llt_inplace<Scalar, Lower>::rankUpdate(matt, vec.conjugate(), sigma); } }; template<typename MatrixType> struct LLT_Traits<MatrixType,Lower> { typedef const TriangularView<const MatrixType, Lower> MatrixL; typedef const TriangularView<const typename MatrixType::AdjointReturnType, Upper> MatrixU; static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); } static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); } static bool inplace_decomposition(MatrixType& m) { return llt_inplace<typename MatrixType::Scalar, Lower>::blocked(m)==-1; } }; template<typename MatrixType> struct LLT_Traits<MatrixType,Upper> { typedef const TriangularView<const typename MatrixType::AdjointReturnType, Lower> MatrixL; typedef const TriangularView<const MatrixType, Upper> MatrixU; static inline MatrixL getL(const MatrixType& m) { return MatrixL(m.adjoint()); } static inline MatrixU getU(const MatrixType& m) { return MatrixU(m); } static bool inplace_decomposition(MatrixType& m) { return llt_inplace<typename MatrixType::Scalar, Upper>::blocked(m)==-1; } }; } // end namespace internal /** Computes / recomputes the Cholesky decomposition A = LL^* = U^*U of \a matrix * * \returns a reference to *this * * Example: \include TutorialLinAlgComputeTwice.cpp * Output: \verbinclude TutorialLinAlgComputeTwice.out */ template<typename MatrixType, int _UpLo> template<typename InputType> LLT<MatrixType,_UpLo>& LLT<MatrixType,_UpLo>::compute(const EigenBase<InputType>& a) { check_template_parameters(); eigen_assert(a.rows()==a.cols()); const Index size = a.rows(); m_matrix.resize(size, size); m_matrix = a.derived(); // Compute matrix L1 norm = max abs column sum. m_l1_norm = RealScalar(0); // TODO move this code to SelfAdjointView for (Index col = 0; col < size; ++col) { RealScalar abs_col_sum; if (_UpLo == Lower) abs_col_sum = m_matrix.col(col).tail(size - col).template lpNorm<1>() + m_matrix.row(col).head(col).template lpNorm<1>(); else abs_col_sum = m_matrix.col(col).head(col).template lpNorm<1>() + m_matrix.row(col).tail(size - col).template lpNorm<1>(); if (abs_col_sum > m_l1_norm) m_l1_norm = abs_col_sum; } m_isInitialized = true; bool ok = Traits::inplace_decomposition(m_matrix); m_info = ok ? Success : NumericalIssue; return *this; } /** Performs a rank one update (or dowdate) of the current decomposition. * If A = LL^* before the rank one update, * then after it we have LL^* = A + sigma * v v^* where \a v must be a vector * of same dimension. */ template<typename _MatrixType, int _UpLo> template<typename VectorType> LLT<_MatrixType,_UpLo> LLT<_MatrixType,_UpLo>::rankUpdate(const VectorType& v, const RealScalar& sigma) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorType); eigen_assert(v.size()==m_matrix.cols()); eigen_assert(m_isInitialized); if(internal::llt_inplace<typename MatrixType::Scalar, UpLo>::rankUpdate(m_matrix,v,sigma)>=0) m_info = NumericalIssue; else m_info = Success; return *this; } #ifndef EIGEN_PARSED_BY_DOXYGEN template<typename _MatrixType,int _UpLo> template<typename RhsType, typename DstType> void LLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) const { dst = rhs; solveInPlace(dst); } #endif /** \internal use x = llt_object.solve(x); * * This is the \em in-place version of solve(). * * \param bAndX represents both the right-hand side matrix b and result x. * * This version avoids a copy when the right hand side matrix b is not needed anymore. * * \sa LLT::solve(), MatrixBase::llt() */ template<typename MatrixType, int _UpLo> template<typename Derived> void LLT<MatrixType,_UpLo>::solveInPlace(MatrixBase<Derived> &bAndX) const { eigen_assert(m_isInitialized && "LLT is not initialized."); eigen_assert(m_matrix.rows()==bAndX.rows()); matrixL().solveInPlace(bAndX); matrixU().solveInPlace(bAndX); } /** \returns the matrix represented by the decomposition, * i.e., it returns the product: L L^*. * This function is provided for debug purpose. */ template<typename MatrixType, int _UpLo> MatrixType LLT<MatrixType,_UpLo>::reconstructedMatrix() const { eigen_assert(m_isInitialized && "LLT is not initialized."); return matrixL() * matrixL().adjoint().toDenseMatrix(); } /** \cholesky_module * \returns the LLT decomposition of \c *this * \sa SelfAdjointView::llt() */ template<typename Derived> inline const LLT<typename MatrixBase<Derived>::PlainObject> MatrixBase<Derived>::llt() const { return LLT<PlainObject>(derived()); } /** \cholesky_module * \returns the LLT decomposition of \c *this * \sa SelfAdjointView::llt() */ template<typename MatrixType, unsigned int UpLo> inline const LLT<typename SelfAdjointView<MatrixType, UpLo>::PlainObject, UpLo> SelfAdjointView<MatrixType, UpLo>::llt() const { return LLT<PlainObject,UpLo>(m_matrix); } } // end namespace Eigen #endif // EIGEN_LLT_H
17,834
32.336449
138
h
abess
abess-master/python/include/Eigen/src/Cholesky/LLT_LAPACKE.h
/* Copyright (c) 2011, Intel Corporation. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************** * Content : Eigen bindings to LAPACKe * LLt decomposition based on LAPACKE_?potrf function. ******************************************************************************** */ #ifndef EIGEN_LLT_LAPACKE_H #define EIGEN_LLT_LAPACKE_H namespace Eigen { namespace internal { template<typename Scalar> struct lapacke_llt; #define EIGEN_LAPACKE_LLT(EIGTYPE, BLASTYPE, LAPACKE_PREFIX) \ template<> struct lapacke_llt<EIGTYPE> \ { \ template<typename MatrixType> \ static inline Index potrf(MatrixType& m, char uplo) \ { \ lapack_int matrix_order; \ lapack_int size, lda, info, StorageOrder; \ EIGTYPE* a; \ eigen_assert(m.rows()==m.cols()); \ /* Set up parameters for ?potrf */ \ size = convert_index<lapack_int>(m.rows()); \ StorageOrder = MatrixType::Flags&RowMajorBit?RowMajor:ColMajor; \ matrix_order = StorageOrder==RowMajor ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \ a = &(m.coeffRef(0,0)); \ lda = convert_index<lapack_int>(m.outerStride()); \ \ info = LAPACKE_##LAPACKE_PREFIX##potrf( matrix_order, uplo, size, (BLASTYPE*)a, lda ); \ info = (info==0) ? -1 : info>0 ? info-1 : size; \ return info; \ } \ }; \ template<> struct llt_inplace<EIGTYPE, Lower> \ { \ template<typename MatrixType> \ static Index blocked(MatrixType& m) \ { \ return lapacke_llt<EIGTYPE>::potrf(m, 'L'); \ } \ template<typename MatrixType, typename VectorType> \ static Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \ { return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); } \ }; \ template<> struct llt_inplace<EIGTYPE, Upper> \ { \ template<typename MatrixType> \ static Index blocked(MatrixType& m) \ { \ return lapacke_llt<EIGTYPE>::potrf(m, 'U'); \ } \ template<typename MatrixType, typename VectorType> \ static Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \ { \ Transpose<MatrixType> matt(mat); \ return llt_inplace<EIGTYPE, Lower>::rankUpdate(matt, vec.conjugate(), sigma); \ } \ }; EIGEN_LAPACKE_LLT(double, double, d) EIGEN_LAPACKE_LLT(float, float, s) EIGEN_LAPACKE_LLT(dcomplex, lapack_complex_double, z) EIGEN_LAPACKE_LLT(scomplex, lapack_complex_float, c) } // end namespace internal } // end namespace Eigen #endif // EIGEN_LLT_LAPACKE_H
3,974
38.75
113
h
abess
abess-master/python/include/Eigen/src/CholmodSupport/CholmodSupport.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CHOLMODSUPPORT_H #define EIGEN_CHOLMODSUPPORT_H namespace Eigen { namespace internal { template<typename Scalar> struct cholmod_configure_matrix; template<> struct cholmod_configure_matrix<double> { template<typename CholmodType> static void run(CholmodType& mat) { mat.xtype = CHOLMOD_REAL; mat.dtype = CHOLMOD_DOUBLE; } }; template<> struct cholmod_configure_matrix<std::complex<double> > { template<typename CholmodType> static void run(CholmodType& mat) { mat.xtype = CHOLMOD_COMPLEX; mat.dtype = CHOLMOD_DOUBLE; } }; // Other scalar types are not yet suppotred by Cholmod // template<> struct cholmod_configure_matrix<float> { // template<typename CholmodType> // static void run(CholmodType& mat) { // mat.xtype = CHOLMOD_REAL; // mat.dtype = CHOLMOD_SINGLE; // } // }; // // template<> struct cholmod_configure_matrix<std::complex<float> > { // template<typename CholmodType> // static void run(CholmodType& mat) { // mat.xtype = CHOLMOD_COMPLEX; // mat.dtype = CHOLMOD_SINGLE; // } // }; } // namespace internal /** Wraps the Eigen sparse matrix \a mat into a Cholmod sparse matrix object. * Note that the data are shared. */ template<typename _Scalar, int _Options, typename _StorageIndex> cholmod_sparse viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_StorageIndex> > mat) { cholmod_sparse res; res.nzmax = mat.nonZeros(); res.nrow = mat.rows(); res.ncol = mat.cols(); res.p = mat.outerIndexPtr(); res.i = mat.innerIndexPtr(); res.x = mat.valuePtr(); res.z = 0; res.sorted = 1; if(mat.isCompressed()) { res.packed = 1; res.nz = 0; } else { res.packed = 0; res.nz = mat.innerNonZeroPtr(); } res.dtype = 0; res.stype = -1; if (internal::is_same<_StorageIndex,int>::value) { res.itype = CHOLMOD_INT; } else if (internal::is_same<_StorageIndex,long>::value) { res.itype = CHOLMOD_LONG; } else { eigen_assert(false && "Index type not supported yet"); } // setup res.xtype internal::cholmod_configure_matrix<_Scalar>::run(res); res.stype = 0; return res; } template<typename _Scalar, int _Options, typename _Index> const cholmod_sparse viewAsCholmod(const SparseMatrix<_Scalar,_Options,_Index>& mat) { cholmod_sparse res = viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_Index> >(mat.const_cast_derived())); return res; } template<typename _Scalar, int _Options, typename _Index> const cholmod_sparse viewAsCholmod(const SparseVector<_Scalar,_Options,_Index>& mat) { cholmod_sparse res = viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_Index> >(mat.const_cast_derived())); return res; } /** Returns a view of the Eigen sparse matrix \a mat as Cholmod sparse matrix. * The data are not copied but shared. */ template<typename _Scalar, int _Options, typename _Index, unsigned int UpLo> cholmod_sparse viewAsCholmod(const SparseSelfAdjointView<const SparseMatrix<_Scalar,_Options,_Index>, UpLo>& mat) { cholmod_sparse res = viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_Index> >(mat.matrix().const_cast_derived())); if(UpLo==Upper) res.stype = 1; if(UpLo==Lower) res.stype = -1; return res; } /** Returns a view of the Eigen \b dense matrix \a mat as Cholmod dense matrix. * The data are not copied but shared. */ template<typename Derived> cholmod_dense viewAsCholmod(MatrixBase<Derived>& mat) { EIGEN_STATIC_ASSERT((internal::traits<Derived>::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); typedef typename Derived::Scalar Scalar; cholmod_dense res; res.nrow = mat.rows(); res.ncol = mat.cols(); res.nzmax = res.nrow * res.ncol; res.d = Derived::IsVectorAtCompileTime ? mat.derived().size() : mat.derived().outerStride(); res.x = (void*)(mat.derived().data()); res.z = 0; internal::cholmod_configure_matrix<Scalar>::run(res); return res; } /** Returns a view of the Cholmod sparse matrix \a cm as an Eigen sparse matrix. * The data are not copied but shared. */ template<typename Scalar, int Flags, typename StorageIndex> MappedSparseMatrix<Scalar,Flags,StorageIndex> viewAsEigen(cholmod_sparse& cm) { return MappedSparseMatrix<Scalar,Flags,StorageIndex> (cm.nrow, cm.ncol, static_cast<StorageIndex*>(cm.p)[cm.ncol], static_cast<StorageIndex*>(cm.p), static_cast<StorageIndex*>(cm.i),static_cast<Scalar*>(cm.x) ); } enum CholmodMode { CholmodAuto, CholmodSimplicialLLt, CholmodSupernodalLLt, CholmodLDLt }; /** \ingroup CholmodSupport_Module * \class CholmodBase * \brief The base class for the direct Cholesky factorization of Cholmod * \sa class CholmodSupernodalLLT, class CholmodSimplicialLDLT, class CholmodSimplicialLLT */ template<typename _MatrixType, int _UpLo, typename Derived> class CholmodBase : public SparseSolverBase<Derived> { protected: typedef SparseSolverBase<Derived> Base; using Base::derived; using Base::m_isInitialized; public: typedef _MatrixType MatrixType; enum { UpLo = _UpLo }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef MatrixType CholMatrixType; typedef typename MatrixType::StorageIndex StorageIndex; enum { ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; public: CholmodBase() : m_cholmodFactor(0), m_info(Success), m_factorizationIsOk(false), m_analysisIsOk(false) { EIGEN_STATIC_ASSERT((internal::is_same<double,RealScalar>::value), CHOLMOD_SUPPORTS_DOUBLE_PRECISION_ONLY); m_shiftOffset[0] = m_shiftOffset[1] = 0.0; cholmod_start(&m_cholmod); } explicit CholmodBase(const MatrixType& matrix) : m_cholmodFactor(0), m_info(Success), m_factorizationIsOk(false), m_analysisIsOk(false) { EIGEN_STATIC_ASSERT((internal::is_same<double,RealScalar>::value), CHOLMOD_SUPPORTS_DOUBLE_PRECISION_ONLY); m_shiftOffset[0] = m_shiftOffset[1] = 0.0; cholmod_start(&m_cholmod); compute(matrix); } ~CholmodBase() { if(m_cholmodFactor) cholmod_free_factor(&m_cholmodFactor, &m_cholmod); cholmod_finish(&m_cholmod); } inline StorageIndex cols() const { return internal::convert_index<StorageIndex, Index>(m_cholmodFactor->n); } inline StorageIndex rows() const { return internal::convert_index<StorageIndex, Index>(m_cholmodFactor->n); } /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, * \c NumericalIssue if the matrix.appears to be negative. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "Decomposition is not initialized."); return m_info; } /** Computes the sparse Cholesky decomposition of \a matrix */ Derived& compute(const MatrixType& matrix) { analyzePattern(matrix); factorize(matrix); return derived(); } /** Performs a symbolic decomposition on the sparsity pattern of \a matrix. * * This function is particularly useful when solving for several problems having the same structure. * * \sa factorize() */ void analyzePattern(const MatrixType& matrix) { if(m_cholmodFactor) { cholmod_free_factor(&m_cholmodFactor, &m_cholmod); m_cholmodFactor = 0; } cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView<UpLo>()); m_cholmodFactor = cholmod_analyze(&A, &m_cholmod); this->m_isInitialized = true; this->m_info = Success; m_analysisIsOk = true; m_factorizationIsOk = false; } /** Performs a numeric decomposition of \a matrix * * The given matrix must have the same sparsity pattern as the matrix on which the symbolic decomposition has been performed. * * \sa analyzePattern() */ void factorize(const MatrixType& matrix) { eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView<UpLo>()); cholmod_factorize_p(&A, m_shiftOffset, 0, 0, m_cholmodFactor, &m_cholmod); // If the factorization failed, minor is the column at which it did. On success minor == n. this->m_info = (m_cholmodFactor->minor == m_cholmodFactor->n ? Success : NumericalIssue); m_factorizationIsOk = true; } /** Returns a reference to the Cholmod's configuration structure to get a full control over the performed operations. * See the Cholmod user guide for details. */ cholmod_common& cholmod() { return m_cholmod; } #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal */ template<typename Rhs,typename Dest> void _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const { eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); const Index size = m_cholmodFactor->n; EIGEN_UNUSED_VARIABLE(size); eigen_assert(size==b.rows()); // Cholmod needs column-major stoarge without inner-stride, which corresponds to the default behavior of Ref. Ref<const Matrix<typename Rhs::Scalar,Dynamic,Dynamic,ColMajor> > b_ref(b.derived()); cholmod_dense b_cd = viewAsCholmod(b_ref); cholmod_dense* x_cd = cholmod_solve(CHOLMOD_A, m_cholmodFactor, &b_cd, &m_cholmod); if(!x_cd) { this->m_info = NumericalIssue; return; } // TODO optimize this copy by swapping when possible (be careful with alignment, etc.) dest = Matrix<Scalar,Dest::RowsAtCompileTime,Dest::ColsAtCompileTime>::Map(reinterpret_cast<Scalar*>(x_cd->x),b.rows(),b.cols()); cholmod_free_dense(&x_cd, &m_cholmod); } /** \internal */ template<typename RhsDerived, typename DestDerived> void _solve_impl(const SparseMatrixBase<RhsDerived> &b, SparseMatrixBase<DestDerived> &dest) const { eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); const Index size = m_cholmodFactor->n; EIGEN_UNUSED_VARIABLE(size); eigen_assert(size==b.rows()); // note: cs stands for Cholmod Sparse Ref<SparseMatrix<typename RhsDerived::Scalar,ColMajor,typename RhsDerived::StorageIndex> > b_ref(b.const_cast_derived()); cholmod_sparse b_cs = viewAsCholmod(b_ref); cholmod_sparse* x_cs = cholmod_spsolve(CHOLMOD_A, m_cholmodFactor, &b_cs, &m_cholmod); if(!x_cs) { this->m_info = NumericalIssue; return; } // TODO optimize this copy by swapping when possible (be careful with alignment, etc.) dest.derived() = viewAsEigen<typename DestDerived::Scalar,ColMajor,typename DestDerived::StorageIndex>(*x_cs); cholmod_free_sparse(&x_cs, &m_cholmod); } #endif // EIGEN_PARSED_BY_DOXYGEN /** Sets the shift parameter that will be used to adjust the diagonal coefficients during the numerical factorization. * * During the numerical factorization, an offset term is added to the diagonal coefficients:\n * \c d_ii = \a offset + \c d_ii * * The default is \a offset=0. * * \returns a reference to \c *this. */ Derived& setShift(const RealScalar& offset) { m_shiftOffset[0] = double(offset); return derived(); } /** \returns the determinant of the underlying matrix from the current factorization */ Scalar determinant() const { using std::exp; return exp(logDeterminant()); } /** \returns the log determinant of the underlying matrix from the current factorization */ Scalar logDeterminant() const { using std::log; using numext::real; eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); RealScalar logDet = 0; Scalar *x = static_cast<Scalar*>(m_cholmodFactor->x); if (m_cholmodFactor->is_super) { // Supernodal factorization stored as a packed list of dense column-major blocs, // as described by the following structure: // super[k] == index of the first column of the j-th super node StorageIndex *super = static_cast<StorageIndex*>(m_cholmodFactor->super); // pi[k] == offset to the description of row indices StorageIndex *pi = static_cast<StorageIndex*>(m_cholmodFactor->pi); // px[k] == offset to the respective dense block StorageIndex *px = static_cast<StorageIndex*>(m_cholmodFactor->px); Index nb_super_nodes = m_cholmodFactor->nsuper; for (Index k=0; k < nb_super_nodes; ++k) { StorageIndex ncols = super[k + 1] - super[k]; StorageIndex nrows = pi[k + 1] - pi[k]; Map<const Array<Scalar,1,Dynamic>, 0, InnerStride<> > sk(x + px[k], ncols, InnerStride<>(nrows+1)); logDet += sk.real().log().sum(); } } else { // Simplicial factorization stored as standard CSC matrix. StorageIndex *p = static_cast<StorageIndex*>(m_cholmodFactor->p); Index size = m_cholmodFactor->n; for (Index k=0; k<size; ++k) logDet += log(real( x[p[k]] )); } if (m_cholmodFactor->is_ll) logDet *= 2.0; return logDet; }; template<typename Stream> void dumpMemory(Stream& /*s*/) {} protected: mutable cholmod_common m_cholmod; cholmod_factor* m_cholmodFactor; double m_shiftOffset[2]; mutable ComputationInfo m_info; int m_factorizationIsOk; int m_analysisIsOk; }; /** \ingroup CholmodSupport_Module * \class CholmodSimplicialLLT * \brief A simplicial direct Cholesky (LLT) factorization and solver based on Cholmod * * This class allows to solve for A.X = B sparse linear problems via a simplicial LL^T Cholesky factorization * using the Cholmod library. * This simplicial variant is equivalent to Eigen's built-in SimplicialLLT class. Therefore, it has little practical interest. * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices * X and B can be either dense or sparse. * * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower * or Upper. Default is Lower. * * \implsparsesolverconcept * * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. * * \warning Only double precision real and complex scalar types are supported by Cholmod. * * \sa \ref TutorialSparseSolverConcept, class CholmodSupernodalLLT, class SimplicialLLT */ template<typename _MatrixType, int _UpLo = Lower> class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLLT<_MatrixType, _UpLo> > { typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLLT> Base; using Base::m_cholmod; public: typedef _MatrixType MatrixType; CholmodSimplicialLLT() : Base() { init(); } CholmodSimplicialLLT(const MatrixType& matrix) : Base() { init(); this->compute(matrix); } ~CholmodSimplicialLLT() {} protected: void init() { m_cholmod.final_asis = 0; m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; m_cholmod.final_ll = 1; } }; /** \ingroup CholmodSupport_Module * \class CholmodSimplicialLDLT * \brief A simplicial direct Cholesky (LDLT) factorization and solver based on Cholmod * * This class allows to solve for A.X = B sparse linear problems via a simplicial LDL^T Cholesky factorization * using the Cholmod library. * This simplicial variant is equivalent to Eigen's built-in SimplicialLDLT class. Therefore, it has little practical interest. * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices * X and B can be either dense or sparse. * * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower * or Upper. Default is Lower. * * \implsparsesolverconcept * * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. * * \warning Only double precision real and complex scalar types are supported by Cholmod. * * \sa \ref TutorialSparseSolverConcept, class CholmodSupernodalLLT, class SimplicialLDLT */ template<typename _MatrixType, int _UpLo = Lower> class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLDLT<_MatrixType, _UpLo> > { typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLDLT> Base; using Base::m_cholmod; public: typedef _MatrixType MatrixType; CholmodSimplicialLDLT() : Base() { init(); } CholmodSimplicialLDLT(const MatrixType& matrix) : Base() { init(); this->compute(matrix); } ~CholmodSimplicialLDLT() {} protected: void init() { m_cholmod.final_asis = 1; m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; } }; /** \ingroup CholmodSupport_Module * \class CholmodSupernodalLLT * \brief A supernodal Cholesky (LLT) factorization and solver based on Cholmod * * This class allows to solve for A.X = B sparse linear problems via a supernodal LL^T Cholesky factorization * using the Cholmod library. * This supernodal variant performs best on dense enough problems, e.g., 3D FEM, or very high order 2D FEM. * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices * X and B can be either dense or sparse. * * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower * or Upper. Default is Lower. * * \implsparsesolverconcept * * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. * * \warning Only double precision real and complex scalar types are supported by Cholmod. * * \sa \ref TutorialSparseSolverConcept */ template<typename _MatrixType, int _UpLo = Lower> class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSupernodalLLT<_MatrixType, _UpLo> > { typedef CholmodBase<_MatrixType, _UpLo, CholmodSupernodalLLT> Base; using Base::m_cholmod; public: typedef _MatrixType MatrixType; CholmodSupernodalLLT() : Base() { init(); } CholmodSupernodalLLT(const MatrixType& matrix) : Base() { init(); this->compute(matrix); } ~CholmodSupernodalLLT() {} protected: void init() { m_cholmod.final_asis = 1; m_cholmod.supernodal = CHOLMOD_SUPERNODAL; } }; /** \ingroup CholmodSupport_Module * \class CholmodDecomposition * \brief A general Cholesky factorization and solver based on Cholmod * * This class allows to solve for A.X = B sparse linear problems via a LL^T or LDL^T Cholesky factorization * using the Cholmod library. The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices * X and B can be either dense or sparse. * * This variant permits to change the underlying Cholesky method at runtime. * On the other hand, it does not provide access to the result of the factorization. * The default is to let Cholmod automatically choose between a simplicial and supernodal factorization. * * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower * or Upper. Default is Lower. * * \implsparsesolverconcept * * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. * * \warning Only double precision real and complex scalar types are supported by Cholmod. * * \sa \ref TutorialSparseSolverConcept */ template<typename _MatrixType, int _UpLo = Lower> class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecomposition<_MatrixType, _UpLo> > { typedef CholmodBase<_MatrixType, _UpLo, CholmodDecomposition> Base; using Base::m_cholmod; public: typedef _MatrixType MatrixType; CholmodDecomposition() : Base() { init(); } CholmodDecomposition(const MatrixType& matrix) : Base() { init(); this->compute(matrix); } ~CholmodDecomposition() {} void setMode(CholmodMode mode) { switch(mode) { case CholmodAuto: m_cholmod.final_asis = 1; m_cholmod.supernodal = CHOLMOD_AUTO; break; case CholmodSimplicialLLt: m_cholmod.final_asis = 0; m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; m_cholmod.final_ll = 1; break; case CholmodSupernodalLLt: m_cholmod.final_asis = 1; m_cholmod.supernodal = CHOLMOD_SUPERNODAL; break; case CholmodLDLt: m_cholmod.final_asis = 1; m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; break; default: break; } } protected: void init() { m_cholmod.final_asis = 1; m_cholmod.supernodal = CHOLMOD_AUTO; } }; } // end namespace Eigen #endif // EIGEN_CHOLMODSUPPORT_H
22,307
33.85625
161
h
abess
abess-master/python/include/Eigen/src/Core/Array.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ARRAY_H #define EIGEN_ARRAY_H namespace Eigen { namespace internal { template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols> struct traits<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > : traits<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > { typedef ArrayXpr XprKind; typedef ArrayBase<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > XprBase; }; } /** \class Array * \ingroup Core_Module * * \brief General-purpose arrays with easy API for coefficient-wise operations * * The %Array class is very similar to the Matrix class. It provides * general-purpose one- and two-dimensional arrays. The difference between the * %Array and the %Matrix class is primarily in the API: the API for the * %Array class provides easy access to coefficient-wise operations, while the * API for the %Matrix class provides easy access to linear-algebra * operations. * * See documentation of class Matrix for detailed information on the template parameters * storage layout. * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_ARRAY_PLUGIN. * * \sa \blank \ref TutorialArrayClass, \ref TopicClassHierarchy */ template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols> class Array : public PlainObjectBase<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > { public: typedef PlainObjectBase<Array> Base; EIGEN_DENSE_PUBLIC_INTERFACE(Array) enum { Options = _Options }; typedef typename Base::PlainObject PlainObject; protected: template <typename Derived, typename OtherDerived, bool IsVector> friend struct internal::conservative_resize_like_impl; using Base::m_storage; public: using Base::base; using Base::coeff; using Base::coeffRef; /** * The usage of * using Base::operator=; * fails on MSVC. Since the code below is working with GCC and MSVC, we skipped * the usage of 'using'. This should be done only for operator=. */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array& operator=(const EigenBase<OtherDerived> &other) { return Base::operator=(other); } /** Set all the entries to \a value. * \sa DenseBase::setConstant(), DenseBase::fill() */ /* This overload is needed because the usage of * using Base::operator=; * fails on MSVC. Since the code below is working with GCC and MSVC, we skipped * the usage of 'using'. This should be done only for operator=. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array& operator=(const Scalar &value) { Base::setConstant(value); return *this; } /** Copies the value of the expression \a other into \c *this with automatic resizing. * * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized), * it will be initialized. * * Note that copying a row-vector into a vector (and conversely) is allowed. * The resizing, if any, is then done in the appropriate way so that row-vectors * remain row-vectors and vectors remain vectors. */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array& operator=(const DenseBase<OtherDerived>& other) { return Base::_set(other); } /** This is a special case of the templated operator=. Its purpose is to * prevent a default operator= from hiding the templated operator=. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array& operator=(const Array& other) { return Base::_set(other); } /** Default constructor. * * For fixed-size matrices, does nothing. * * For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix * is called a null matrix. This constructor is the unique way to create null matrices: resizing * a matrix to 0 is not supported. * * \sa resize(Index,Index) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array() : Base() { Base::_check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } #ifndef EIGEN_PARSED_BY_DOXYGEN // FIXME is it still needed ?? /** \internal */ EIGEN_DEVICE_FUNC Array(internal::constructor_without_unaligned_array_assert) : Base(internal::constructor_without_unaligned_array_assert()) { Base::_check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED } #endif #if EIGEN_HAS_RVALUE_REFERENCES EIGEN_DEVICE_FUNC Array(Array&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_constructible<Scalar>::value) : Base(std::move(other)) { Base::_check_template_params(); if (RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic) Base::_set_noalias(other); } EIGEN_DEVICE_FUNC Array& operator=(Array&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_assignable<Scalar>::value) { other.swap(*this); return *this; } #endif #ifndef EIGEN_PARSED_BY_DOXYGEN template<typename T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Array(const T& x) { Base::_check_template_params(); Base::template _init1<T>(x); } template<typename T0, typename T1> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(const T0& val0, const T1& val1) { Base::_check_template_params(); this->template _init2<T0,T1>(val0, val1); } #else /** \brief Constructs a fixed-sized array initialized with coefficients starting at \a data */ EIGEN_DEVICE_FUNC explicit Array(const Scalar *data); /** Constructs a vector or row-vector with given dimension. \only_for_vectors * * Note that this is only useful for dynamic-size vectors. For fixed-size vectors, * it is redundant to pass the dimension here, so it makes more sense to use the default * constructor Array() instead. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Array(Index dim); /** constructs an initialized 1x1 Array with the given coefficient */ Array(const Scalar& value); /** constructs an uninitialized array with \a rows rows and \a cols columns. * * This is useful for dynamic-size arrays. For fixed-size arrays, * it is redundant to pass these parameters, so one should use the default constructor * Array() instead. */ Array(Index rows, Index cols); /** constructs an initialized 2D vector with given coefficients */ Array(const Scalar& val0, const Scalar& val1); #endif /** constructs an initialized 3D vector with given coefficients */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2) { Base::_check_template_params(); EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 3) m_storage.data()[0] = val0; m_storage.data()[1] = val1; m_storage.data()[2] = val2; } /** constructs an initialized 4D vector with given coefficients */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2, const Scalar& val3) { Base::_check_template_params(); EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 4) m_storage.data()[0] = val0; m_storage.data()[1] = val1; m_storage.data()[2] = val2; m_storage.data()[3] = val3; } /** Copy constructor */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(const Array& other) : Base(other) { } private: struct PrivateType {}; public: /** \sa MatrixBase::operator=(const EigenBase<OtherDerived>&) */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Array(const EigenBase<OtherDerived> &other, typename internal::enable_if<internal::is_convertible<typename OtherDerived::Scalar,Scalar>::value, PrivateType>::type = PrivateType()) : Base(other.derived()) { } EIGEN_DEVICE_FUNC inline Index innerStride() const { return 1; } EIGEN_DEVICE_FUNC inline Index outerStride() const { return this->innerSize(); } #ifdef EIGEN_ARRAY_PLUGIN #include EIGEN_ARRAY_PLUGIN #endif private: template<typename MatrixType, typename OtherDerived, bool SwapPointers> friend struct internal::matrix_swap_impl; }; /** \defgroup arraytypedefs Global array typedefs * \ingroup Core_Module * * Eigen defines several typedef shortcuts for most common 1D and 2D array types. * * The general patterns are the following: * * \c ArrayRowsColsType where \c Rows and \c Cols can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size, * and where \c Type can be \c i for integer, \c f for float, \c d for double, \c cf for complex float, \c cd * for complex double. * * For example, \c Array33d is a fixed-size 3x3 array type of doubles, and \c ArrayXXf is a dynamic-size matrix of floats. * * There are also \c ArraySizeType which are self-explanatory. For example, \c Array4cf is * a fixed-size 1D array of 4 complex floats. * * \sa class Array */ #define EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \ /** \ingroup arraytypedefs */ \ typedef Array<Type, Size, Size> Array##SizeSuffix##SizeSuffix##TypeSuffix; \ /** \ingroup arraytypedefs */ \ typedef Array<Type, Size, 1> Array##SizeSuffix##TypeSuffix; #define EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, Size) \ /** \ingroup arraytypedefs */ \ typedef Array<Type, Size, Dynamic> Array##Size##X##TypeSuffix; \ /** \ingroup arraytypedefs */ \ typedef Array<Type, Dynamic, Size> Array##X##Size##TypeSuffix; #define EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \ EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 2, 2) \ EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 3, 3) \ EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 4, 4) \ EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Dynamic, X) \ EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 2) \ EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 3) \ EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 4) EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(int, i) EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(float, f) EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(double, d) EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex<float>, cf) EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex<double>, cd) #undef EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES #undef EIGEN_MAKE_ARRAY_TYPEDEFS #undef EIGEN_MAKE_ARRAY_TYPEDEFS_LARGE #define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, SizeSuffix) \ using Eigen::Matrix##SizeSuffix##TypeSuffix; \ using Eigen::Vector##SizeSuffix##TypeSuffix; \ using Eigen::RowVector##SizeSuffix##TypeSuffix; #define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(TypeSuffix) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 2) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 3) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 4) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, X) \ #define EIGEN_USING_ARRAY_TYPEDEFS \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(i) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(f) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(d) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cf) \ EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cd) } // end namespace Eigen #endif // EIGEN_ARRAY_H
12,218
35.804217
145
h
abess
abess-master/python/include/Eigen/src/Core/ArrayBase.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ARRAYBASE_H #define EIGEN_ARRAYBASE_H namespace Eigen { template<typename ExpressionType> class MatrixWrapper; /** \class ArrayBase * \ingroup Core_Module * * \brief Base class for all 1D and 2D array, and related expressions * * An array is similar to a dense vector or matrix. While matrices are mathematical * objects with well defined linear algebra operators, an array is just a collection * of scalar values arranged in a one or two dimensionnal fashion. As the main consequence, * all operations applied to an array are performed coefficient wise. Furthermore, * arrays support scalar math functions of the c++ standard library (e.g., std::sin(x)), and convenient * constructors allowing to easily write generic code working for both scalar values * and arrays. * * This class is the base that is inherited by all array expression types. * * \tparam Derived is the derived type, e.g., an array or an expression type. * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_ARRAYBASE_PLUGIN. * * \sa class MatrixBase, \ref TopicClassHierarchy */ template<typename Derived> class ArrayBase : public DenseBase<Derived> { public: #ifndef EIGEN_PARSED_BY_DOXYGEN /** The base class for a given storage type. */ typedef ArrayBase StorageBaseType; typedef ArrayBase Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl; typedef typename internal::traits<Derived>::StorageKind StorageKind; typedef typename internal::traits<Derived>::Scalar Scalar; typedef typename internal::packet_traits<Scalar>::type PacketScalar; typedef typename NumTraits<Scalar>::Real RealScalar; typedef DenseBase<Derived> Base; using Base::RowsAtCompileTime; using Base::ColsAtCompileTime; using Base::SizeAtCompileTime; using Base::MaxRowsAtCompileTime; using Base::MaxColsAtCompileTime; using Base::MaxSizeAtCompileTime; using Base::IsVectorAtCompileTime; using Base::Flags; using Base::derived; using Base::const_cast_derived; using Base::rows; using Base::cols; using Base::size; using Base::coeff; using Base::coeffRef; using Base::lazyAssign; using Base::operator=; using Base::operator+=; using Base::operator-=; using Base::operator*=; using Base::operator/=; typedef typename Base::CoeffReturnType CoeffReturnType; #endif // not EIGEN_PARSED_BY_DOXYGEN #ifndef EIGEN_PARSED_BY_DOXYGEN typedef typename Base::PlainObject PlainObject; /** \internal Represents a matrix with all coefficients equal to one another*/ typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,PlainObject> ConstantReturnType; #endif // not EIGEN_PARSED_BY_DOXYGEN #define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::ArrayBase #define EIGEN_DOC_UNARY_ADDONS(X,Y) # include "../plugins/CommonCwiseUnaryOps.h" # include "../plugins/MatrixCwiseUnaryOps.h" # include "../plugins/ArrayCwiseUnaryOps.h" # include "../plugins/CommonCwiseBinaryOps.h" # include "../plugins/MatrixCwiseBinaryOps.h" # include "../plugins/ArrayCwiseBinaryOps.h" # ifdef EIGEN_ARRAYBASE_PLUGIN # include EIGEN_ARRAYBASE_PLUGIN # endif #undef EIGEN_CURRENT_STORAGE_BASE_CLASS #undef EIGEN_DOC_UNARY_ADDONS /** Special case of the template operator=, in order to prevent the compiler * from generating a default operator= (issue hit with g++ 4.1) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const ArrayBase& other) { internal::call_assignment(derived(), other.derived()); return derived(); } /** Set all the entries to \a value. * \sa DenseBase::setConstant(), DenseBase::fill() */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const Scalar &value) { Base::setConstant(value); return derived(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator+=(const Scalar& scalar); EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator-=(const Scalar& scalar); template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator+=(const ArrayBase<OtherDerived>& other); template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator-=(const ArrayBase<OtherDerived>& other); template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator*=(const ArrayBase<OtherDerived>& other); template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator/=(const ArrayBase<OtherDerived>& other); public: EIGEN_DEVICE_FUNC ArrayBase<Derived>& array() { return *this; } EIGEN_DEVICE_FUNC const ArrayBase<Derived>& array() const { return *this; } /** \returns an \link Eigen::MatrixBase Matrix \endlink expression of this array * \sa MatrixBase::array() */ EIGEN_DEVICE_FUNC MatrixWrapper<Derived> matrix() { return MatrixWrapper<Derived>(derived()); } EIGEN_DEVICE_FUNC const MatrixWrapper<const Derived> matrix() const { return MatrixWrapper<const Derived>(derived()); } // template<typename Dest> // inline void evalTo(Dest& dst) const { dst = matrix(); } protected: EIGEN_DEVICE_FUNC ArrayBase() : Base() {} private: explicit ArrayBase(Index); ArrayBase(Index,Index); template<typename OtherDerived> explicit ArrayBase(const ArrayBase<OtherDerived>&); protected: // mixing arrays and matrices is not legal template<typename OtherDerived> Derived& operator+=(const MatrixBase<OtherDerived>& ) {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} // mixing arrays and matrices is not legal template<typename OtherDerived> Derived& operator-=(const MatrixBase<OtherDerived>& ) {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} }; /** replaces \c *this by \c *this - \a other. * * \returns a reference to \c *this */ template<typename Derived> template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived & ArrayBase<Derived>::operator-=(const ArrayBase<OtherDerived> &other) { call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>()); return derived(); } /** replaces \c *this by \c *this + \a other. * * \returns a reference to \c *this */ template<typename Derived> template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived & ArrayBase<Derived>::operator+=(const ArrayBase<OtherDerived>& other) { call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>()); return derived(); } /** replaces \c *this by \c *this * \a other coefficient wise. * * \returns a reference to \c *this */ template<typename Derived> template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived & ArrayBase<Derived>::operator*=(const ArrayBase<OtherDerived>& other) { call_assignment(derived(), other.derived(), internal::mul_assign_op<Scalar,typename OtherDerived::Scalar>()); return derived(); } /** replaces \c *this by \c *this / \a other coefficient wise. * * \returns a reference to \c *this */ template<typename Derived> template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived & ArrayBase<Derived>::operator/=(const ArrayBase<OtherDerived>& other) { call_assignment(derived(), other.derived(), internal::div_assign_op<Scalar,typename OtherDerived::Scalar>()); return derived(); } } // end namespace Eigen #endif // EIGEN_ARRAYBASE_H
8,179
35.035242
134
h
abess
abess-master/python/include/Eigen/src/Core/ArrayWrapper.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ARRAYWRAPPER_H #define EIGEN_ARRAYWRAPPER_H namespace Eigen { /** \class ArrayWrapper * \ingroup Core_Module * * \brief Expression of a mathematical vector or matrix as an array object * * This class is the return type of MatrixBase::array(), and most of the time * this is the only way it is use. * * \sa MatrixBase::array(), class MatrixWrapper */ namespace internal { template<typename ExpressionType> struct traits<ArrayWrapper<ExpressionType> > : public traits<typename remove_all<typename ExpressionType::Nested>::type > { typedef ArrayXpr XprKind; // Let's remove NestByRefBit enum { Flags0 = traits<typename remove_all<typename ExpressionType::Nested>::type >::Flags, LvalueBitFlag = is_lvalue<ExpressionType>::value ? LvalueBit : 0, Flags = (Flags0 & ~(NestByRefBit | LvalueBit)) | LvalueBitFlag }; }; } template<typename ExpressionType> class ArrayWrapper : public ArrayBase<ArrayWrapper<ExpressionType> > { public: typedef ArrayBase<ArrayWrapper> Base; EIGEN_DENSE_PUBLIC_INTERFACE(ArrayWrapper) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ArrayWrapper) typedef typename internal::remove_all<ExpressionType>::type NestedExpression; typedef typename internal::conditional< internal::is_lvalue<ExpressionType>::value, Scalar, const Scalar >::type ScalarWithConstIfNotLvalue; typedef typename internal::ref_selector<ExpressionType>::non_const_type NestedExpressionType; using Base::coeffRef; EIGEN_DEVICE_FUNC explicit EIGEN_STRONG_INLINE ArrayWrapper(ExpressionType& matrix) : m_expression(matrix) {} EIGEN_DEVICE_FUNC inline Index rows() const { return m_expression.rows(); } EIGEN_DEVICE_FUNC inline Index cols() const { return m_expression.cols(); } EIGEN_DEVICE_FUNC inline Index outerStride() const { return m_expression.outerStride(); } EIGEN_DEVICE_FUNC inline Index innerStride() const { return m_expression.innerStride(); } EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); } EIGEN_DEVICE_FUNC inline const Scalar* data() const { return m_expression.data(); } EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const { return m_expression.coeffRef(rowId, colId); } EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const { return m_expression.coeffRef(index); } template<typename Dest> EIGEN_DEVICE_FUNC inline void evalTo(Dest& dst) const { dst = m_expression; } const typename internal::remove_all<NestedExpressionType>::type& EIGEN_DEVICE_FUNC nestedExpression() const { return m_expression; } /** Forwards the resizing request to the nested expression * \sa DenseBase::resize(Index) */ EIGEN_DEVICE_FUNC void resize(Index newSize) { m_expression.resize(newSize); } /** Forwards the resizing request to the nested expression * \sa DenseBase::resize(Index,Index)*/ EIGEN_DEVICE_FUNC void resize(Index rows, Index cols) { m_expression.resize(rows,cols); } protected: NestedExpressionType m_expression; }; /** \class MatrixWrapper * \ingroup Core_Module * * \brief Expression of an array as a mathematical vector or matrix * * This class is the return type of ArrayBase::matrix(), and most of the time * this is the only way it is use. * * \sa MatrixBase::matrix(), class ArrayWrapper */ namespace internal { template<typename ExpressionType> struct traits<MatrixWrapper<ExpressionType> > : public traits<typename remove_all<typename ExpressionType::Nested>::type > { typedef MatrixXpr XprKind; // Let's remove NestByRefBit enum { Flags0 = traits<typename remove_all<typename ExpressionType::Nested>::type >::Flags, LvalueBitFlag = is_lvalue<ExpressionType>::value ? LvalueBit : 0, Flags = (Flags0 & ~(NestByRefBit | LvalueBit)) | LvalueBitFlag }; }; } template<typename ExpressionType> class MatrixWrapper : public MatrixBase<MatrixWrapper<ExpressionType> > { public: typedef MatrixBase<MatrixWrapper<ExpressionType> > Base; EIGEN_DENSE_PUBLIC_INTERFACE(MatrixWrapper) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(MatrixWrapper) typedef typename internal::remove_all<ExpressionType>::type NestedExpression; typedef typename internal::conditional< internal::is_lvalue<ExpressionType>::value, Scalar, const Scalar >::type ScalarWithConstIfNotLvalue; typedef typename internal::ref_selector<ExpressionType>::non_const_type NestedExpressionType; using Base::coeffRef; EIGEN_DEVICE_FUNC explicit inline MatrixWrapper(ExpressionType& matrix) : m_expression(matrix) {} EIGEN_DEVICE_FUNC inline Index rows() const { return m_expression.rows(); } EIGEN_DEVICE_FUNC inline Index cols() const { return m_expression.cols(); } EIGEN_DEVICE_FUNC inline Index outerStride() const { return m_expression.outerStride(); } EIGEN_DEVICE_FUNC inline Index innerStride() const { return m_expression.innerStride(); } EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); } EIGEN_DEVICE_FUNC inline const Scalar* data() const { return m_expression.data(); } EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const { return m_expression.derived().coeffRef(rowId, colId); } EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const { return m_expression.coeffRef(index); } EIGEN_DEVICE_FUNC const typename internal::remove_all<NestedExpressionType>::type& nestedExpression() const { return m_expression; } /** Forwards the resizing request to the nested expression * \sa DenseBase::resize(Index) */ EIGEN_DEVICE_FUNC void resize(Index newSize) { m_expression.resize(newSize); } /** Forwards the resizing request to the nested expression * \sa DenseBase::resize(Index,Index)*/ EIGEN_DEVICE_FUNC void resize(Index rows, Index cols) { m_expression.resize(rows,cols); } protected: NestedExpressionType m_expression; }; } // end namespace Eigen #endif // EIGEN_ARRAYWRAPPER_H
6,775
31.266667
97
h
abess
abess-master/python/include/Eigen/src/Core/Assign.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2007 Michael Olbrich <michael.olbrich@gmx.net> // Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ASSIGN_H #define EIGEN_ASSIGN_H namespace Eigen { template<typename Derived> template<typename OtherDerived> EIGEN_STRONG_INLINE Derived& DenseBase<Derived> ::lazyAssign(const DenseBase<OtherDerived>& other) { enum{ SameType = internal::is_same<typename Derived::Scalar,typename OtherDerived::Scalar>::value }; EIGEN_STATIC_ASSERT_LVALUE(Derived) EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived) EIGEN_STATIC_ASSERT(SameType,YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) eigen_assert(rows() == other.rows() && cols() == other.cols()); internal::call_assignment_no_alias(derived(),other.derived()); return derived(); } template<typename Derived> template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator=(const DenseBase<OtherDerived>& other) { internal::call_assignment(derived(), other.derived()); return derived(); } template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator=(const DenseBase& other) { internal::call_assignment(derived(), other.derived()); return derived(); } template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const MatrixBase& other) { internal::call_assignment(derived(), other.derived()); return derived(); } template<typename Derived> template <typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const DenseBase<OtherDerived>& other) { internal::call_assignment(derived(), other.derived()); return derived(); } template<typename Derived> template <typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const EigenBase<OtherDerived>& other) { internal::call_assignment(derived(), other.derived()); return derived(); } template<typename Derived> template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other) { other.derived().evalTo(derived()); return derived(); } } // end namespace Eigen #endif // EIGEN_ASSIGN_H
2,720
28.901099
145
h
abess
abess-master/python/include/Eigen/src/Core/AssignEvaluator.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2011-2012 Jitse Niesen <jitse@maths.leeds.ac.uk> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ASSIGN_EVALUATOR_H #define EIGEN_ASSIGN_EVALUATOR_H namespace Eigen { // This implementation is based on Assign.h namespace internal { /*************************************************************************** * Part 1 : the logic deciding a strategy for traversal and unrolling * ***************************************************************************/ // copy_using_evaluator_traits is based on assign_traits template <typename DstEvaluator, typename SrcEvaluator, typename AssignFunc> struct copy_using_evaluator_traits { typedef typename DstEvaluator::XprType Dst; typedef typename Dst::Scalar DstScalar; enum { DstFlags = DstEvaluator::Flags, SrcFlags = SrcEvaluator::Flags }; public: enum { DstAlignment = DstEvaluator::Alignment, SrcAlignment = SrcEvaluator::Alignment, DstHasDirectAccess = DstFlags & DirectAccessBit, JointAlignment = EIGEN_PLAIN_ENUM_MIN(DstAlignment,SrcAlignment) }; private: enum { InnerSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::SizeAtCompileTime) : int(DstFlags)&RowMajorBit ? int(Dst::ColsAtCompileTime) : int(Dst::RowsAtCompileTime), InnerMaxSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::MaxSizeAtCompileTime) : int(DstFlags)&RowMajorBit ? int(Dst::MaxColsAtCompileTime) : int(Dst::MaxRowsAtCompileTime), OuterStride = int(outer_stride_at_compile_time<Dst>::ret), MaxSizeAtCompileTime = Dst::SizeAtCompileTime }; // TODO distinguish between linear traversal and inner-traversals typedef typename find_best_packet<DstScalar,Dst::SizeAtCompileTime>::type LinearPacketType; typedef typename find_best_packet<DstScalar,InnerSize>::type InnerPacketType; enum { LinearPacketSize = unpacket_traits<LinearPacketType>::size, InnerPacketSize = unpacket_traits<InnerPacketType>::size }; public: enum { LinearRequiredAlignment = unpacket_traits<LinearPacketType>::alignment, InnerRequiredAlignment = unpacket_traits<InnerPacketType>::alignment }; private: enum { DstIsRowMajor = DstFlags&RowMajorBit, SrcIsRowMajor = SrcFlags&RowMajorBit, StorageOrdersAgree = (int(DstIsRowMajor) == int(SrcIsRowMajor)), MightVectorize = bool(StorageOrdersAgree) && (int(DstFlags) & int(SrcFlags) & ActualPacketAccessBit) && bool(functor_traits<AssignFunc>::PacketAccess), MayInnerVectorize = MightVectorize && int(InnerSize)!=Dynamic && int(InnerSize)%int(InnerPacketSize)==0 && int(OuterStride)!=Dynamic && int(OuterStride)%int(InnerPacketSize)==0 && (EIGEN_UNALIGNED_VECTORIZE || int(JointAlignment)>=int(InnerRequiredAlignment)), MayLinearize = bool(StorageOrdersAgree) && (int(DstFlags) & int(SrcFlags) & LinearAccessBit), MayLinearVectorize = bool(MightVectorize) && MayLinearize && DstHasDirectAccess && (EIGEN_UNALIGNED_VECTORIZE || (int(DstAlignment)>=int(LinearRequiredAlignment)) || MaxSizeAtCompileTime == Dynamic), /* If the destination isn't aligned, we have to do runtime checks and we don't unroll, so it's only good for large enough sizes. */ MaySliceVectorize = bool(MightVectorize) && bool(DstHasDirectAccess) && (int(InnerMaxSize)==Dynamic || int(InnerMaxSize)>=(EIGEN_UNALIGNED_VECTORIZE?InnerPacketSize:(3*InnerPacketSize))) /* slice vectorization can be slow, so we only want it if the slices are big, which is indicated by InnerMaxSize rather than InnerSize, think of the case of a dynamic block in a fixed-size matrix However, with EIGEN_UNALIGNED_VECTORIZE and unrolling, slice vectorization is still worth it */ }; public: enum { Traversal = int(MayLinearVectorize) && (LinearPacketSize>InnerPacketSize) ? int(LinearVectorizedTraversal) : int(MayInnerVectorize) ? int(InnerVectorizedTraversal) : int(MayLinearVectorize) ? int(LinearVectorizedTraversal) : int(MaySliceVectorize) ? int(SliceVectorizedTraversal) : int(MayLinearize) ? int(LinearTraversal) : int(DefaultTraversal), Vectorized = int(Traversal) == InnerVectorizedTraversal || int(Traversal) == LinearVectorizedTraversal || int(Traversal) == SliceVectorizedTraversal }; typedef typename conditional<int(Traversal)==LinearVectorizedTraversal, LinearPacketType, InnerPacketType>::type PacketType; private: enum { ActualPacketSize = int(Traversal)==LinearVectorizedTraversal ? LinearPacketSize : Vectorized ? InnerPacketSize : 1, UnrollingLimit = EIGEN_UNROLLING_LIMIT * ActualPacketSize, MayUnrollCompletely = int(Dst::SizeAtCompileTime) != Dynamic && int(Dst::SizeAtCompileTime) * (int(DstEvaluator::CoeffReadCost)+int(SrcEvaluator::CoeffReadCost)) <= int(UnrollingLimit), MayUnrollInner = int(InnerSize) != Dynamic && int(InnerSize) * (int(DstEvaluator::CoeffReadCost)+int(SrcEvaluator::CoeffReadCost)) <= int(UnrollingLimit) }; public: enum { Unrolling = (int(Traversal) == int(InnerVectorizedTraversal) || int(Traversal) == int(DefaultTraversal)) ? ( int(MayUnrollCompletely) ? int(CompleteUnrolling) : int(MayUnrollInner) ? int(InnerUnrolling) : int(NoUnrolling) ) : int(Traversal) == int(LinearVectorizedTraversal) ? ( bool(MayUnrollCompletely) && ( EIGEN_UNALIGNED_VECTORIZE || (int(DstAlignment)>=int(LinearRequiredAlignment))) ? int(CompleteUnrolling) : int(NoUnrolling) ) : int(Traversal) == int(LinearTraversal) ? ( bool(MayUnrollCompletely) ? int(CompleteUnrolling) : int(NoUnrolling) ) #if EIGEN_UNALIGNED_VECTORIZE : int(Traversal) == int(SliceVectorizedTraversal) ? ( bool(MayUnrollInner) ? int(InnerUnrolling) : int(NoUnrolling) ) #endif : int(NoUnrolling) }; #ifdef EIGEN_DEBUG_ASSIGN static void debug() { std::cerr << "DstXpr: " << typeid(typename DstEvaluator::XprType).name() << std::endl; std::cerr << "SrcXpr: " << typeid(typename SrcEvaluator::XprType).name() << std::endl; std::cerr.setf(std::ios::hex, std::ios::basefield); std::cerr << "DstFlags" << " = " << DstFlags << " (" << demangle_flags(DstFlags) << " )" << std::endl; std::cerr << "SrcFlags" << " = " << SrcFlags << " (" << demangle_flags(SrcFlags) << " )" << std::endl; std::cerr.unsetf(std::ios::hex); EIGEN_DEBUG_VAR(DstAlignment) EIGEN_DEBUG_VAR(SrcAlignment) EIGEN_DEBUG_VAR(LinearRequiredAlignment) EIGEN_DEBUG_VAR(InnerRequiredAlignment) EIGEN_DEBUG_VAR(JointAlignment) EIGEN_DEBUG_VAR(InnerSize) EIGEN_DEBUG_VAR(InnerMaxSize) EIGEN_DEBUG_VAR(LinearPacketSize) EIGEN_DEBUG_VAR(InnerPacketSize) EIGEN_DEBUG_VAR(ActualPacketSize) EIGEN_DEBUG_VAR(StorageOrdersAgree) EIGEN_DEBUG_VAR(MightVectorize) EIGEN_DEBUG_VAR(MayLinearize) EIGEN_DEBUG_VAR(MayInnerVectorize) EIGEN_DEBUG_VAR(MayLinearVectorize) EIGEN_DEBUG_VAR(MaySliceVectorize) std::cerr << "Traversal" << " = " << Traversal << " (" << demangle_traversal(Traversal) << ")" << std::endl; EIGEN_DEBUG_VAR(SrcEvaluator::CoeffReadCost) EIGEN_DEBUG_VAR(UnrollingLimit) EIGEN_DEBUG_VAR(MayUnrollCompletely) EIGEN_DEBUG_VAR(MayUnrollInner) std::cerr << "Unrolling" << " = " << Unrolling << " (" << demangle_unrolling(Unrolling) << ")" << std::endl; std::cerr << std::endl; } #endif }; /*************************************************************************** * Part 2 : meta-unrollers ***************************************************************************/ /************************ *** Default traversal *** ************************/ template<typename Kernel, int Index, int Stop> struct copy_using_evaluator_DefaultTraversal_CompleteUnrolling { // FIXME: this is not very clean, perhaps this information should be provided by the kernel? typedef typename Kernel::DstEvaluatorType DstEvaluatorType; typedef typename DstEvaluatorType::XprType DstXprType; enum { outer = Index / DstXprType::InnerSizeAtCompileTime, inner = Index % DstXprType::InnerSizeAtCompileTime }; EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { kernel.assignCoeffByOuterInner(outer, inner); copy_using_evaluator_DefaultTraversal_CompleteUnrolling<Kernel, Index+1, Stop>::run(kernel); } }; template<typename Kernel, int Stop> struct copy_using_evaluator_DefaultTraversal_CompleteUnrolling<Kernel, Stop, Stop> { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&) { } }; template<typename Kernel, int Index_, int Stop> struct copy_using_evaluator_DefaultTraversal_InnerUnrolling { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, Index outer) { kernel.assignCoeffByOuterInner(outer, Index_); copy_using_evaluator_DefaultTraversal_InnerUnrolling<Kernel, Index_+1, Stop>::run(kernel, outer); } }; template<typename Kernel, int Stop> struct copy_using_evaluator_DefaultTraversal_InnerUnrolling<Kernel, Stop, Stop> { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, Index) { } }; /*********************** *** Linear traversal *** ***********************/ template<typename Kernel, int Index, int Stop> struct copy_using_evaluator_LinearTraversal_CompleteUnrolling { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel& kernel) { kernel.assignCoeff(Index); copy_using_evaluator_LinearTraversal_CompleteUnrolling<Kernel, Index+1, Stop>::run(kernel); } }; template<typename Kernel, int Stop> struct copy_using_evaluator_LinearTraversal_CompleteUnrolling<Kernel, Stop, Stop> { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&) { } }; /************************** *** Inner vectorization *** **************************/ template<typename Kernel, int Index, int Stop> struct copy_using_evaluator_innervec_CompleteUnrolling { // FIXME: this is not very clean, perhaps this information should be provided by the kernel? typedef typename Kernel::DstEvaluatorType DstEvaluatorType; typedef typename DstEvaluatorType::XprType DstXprType; typedef typename Kernel::PacketType PacketType; enum { outer = Index / DstXprType::InnerSizeAtCompileTime, inner = Index % DstXprType::InnerSizeAtCompileTime, SrcAlignment = Kernel::AssignmentTraits::SrcAlignment, DstAlignment = Kernel::AssignmentTraits::DstAlignment }; EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { kernel.template assignPacketByOuterInner<DstAlignment, SrcAlignment, PacketType>(outer, inner); enum { NextIndex = Index + unpacket_traits<PacketType>::size }; copy_using_evaluator_innervec_CompleteUnrolling<Kernel, NextIndex, Stop>::run(kernel); } }; template<typename Kernel, int Stop> struct copy_using_evaluator_innervec_CompleteUnrolling<Kernel, Stop, Stop> { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&) { } }; template<typename Kernel, int Index_, int Stop, int SrcAlignment, int DstAlignment> struct copy_using_evaluator_innervec_InnerUnrolling { typedef typename Kernel::PacketType PacketType; EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, Index outer) { kernel.template assignPacketByOuterInner<DstAlignment, SrcAlignment, PacketType>(outer, Index_); enum { NextIndex = Index_ + unpacket_traits<PacketType>::size }; copy_using_evaluator_innervec_InnerUnrolling<Kernel, NextIndex, Stop, SrcAlignment, DstAlignment>::run(kernel, outer); } }; template<typename Kernel, int Stop, int SrcAlignment, int DstAlignment> struct copy_using_evaluator_innervec_InnerUnrolling<Kernel, Stop, Stop, SrcAlignment, DstAlignment> { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &, Index) { } }; /*************************************************************************** * Part 3 : implementation of all cases ***************************************************************************/ // dense_assignment_loop is based on assign_impl template<typename Kernel, int Traversal = Kernel::AssignmentTraits::Traversal, int Unrolling = Kernel::AssignmentTraits::Unrolling> struct dense_assignment_loop; /************************ *** Default traversal *** ************************/ template<typename Kernel> struct dense_assignment_loop<Kernel, DefaultTraversal, NoUnrolling> { EIGEN_DEVICE_FUNC static void EIGEN_STRONG_INLINE run(Kernel &kernel) { for(Index outer = 0; outer < kernel.outerSize(); ++outer) { for(Index inner = 0; inner < kernel.innerSize(); ++inner) { kernel.assignCoeffByOuterInner(outer, inner); } } } }; template<typename Kernel> struct dense_assignment_loop<Kernel, DefaultTraversal, CompleteUnrolling> { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { typedef typename Kernel::DstEvaluatorType::XprType DstXprType; copy_using_evaluator_DefaultTraversal_CompleteUnrolling<Kernel, 0, DstXprType::SizeAtCompileTime>::run(kernel); } }; template<typename Kernel> struct dense_assignment_loop<Kernel, DefaultTraversal, InnerUnrolling> { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { typedef typename Kernel::DstEvaluatorType::XprType DstXprType; const Index outerSize = kernel.outerSize(); for(Index outer = 0; outer < outerSize; ++outer) copy_using_evaluator_DefaultTraversal_InnerUnrolling<Kernel, 0, DstXprType::InnerSizeAtCompileTime>::run(kernel, outer); } }; /*************************** *** Linear vectorization *** ***************************/ // The goal of unaligned_dense_assignment_loop is simply to factorize the handling // of the non vectorizable beginning and ending parts template <bool IsAligned = false> struct unaligned_dense_assignment_loop { // if IsAligned = true, then do nothing template <typename Kernel> EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, Index, Index) {} }; template <> struct unaligned_dense_assignment_loop<false> { // MSVC must not inline this functions. If it does, it fails to optimize the // packet access path. // FIXME check which version exhibits this issue #if EIGEN_COMP_MSVC template <typename Kernel> static EIGEN_DONT_INLINE void run(Kernel &kernel, Index start, Index end) #else template <typename Kernel> EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, Index start, Index end) #endif { for (Index index = start; index < end; ++index) kernel.assignCoeff(index); } }; template<typename Kernel> struct dense_assignment_loop<Kernel, LinearVectorizedTraversal, NoUnrolling> { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { const Index size = kernel.size(); typedef typename Kernel::Scalar Scalar; typedef typename Kernel::PacketType PacketType; enum { requestedAlignment = Kernel::AssignmentTraits::LinearRequiredAlignment, packetSize = unpacket_traits<PacketType>::size, dstIsAligned = int(Kernel::AssignmentTraits::DstAlignment)>=int(requestedAlignment), dstAlignment = packet_traits<Scalar>::AlignedOnScalar ? int(requestedAlignment) : int(Kernel::AssignmentTraits::DstAlignment), srcAlignment = Kernel::AssignmentTraits::JointAlignment }; const Index alignedStart = dstIsAligned ? 0 : internal::first_aligned<requestedAlignment>(kernel.dstDataPtr(), size); const Index alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize; unaligned_dense_assignment_loop<dstIsAligned!=0>::run(kernel, 0, alignedStart); for(Index index = alignedStart; index < alignedEnd; index += packetSize) kernel.template assignPacket<dstAlignment, srcAlignment, PacketType>(index); unaligned_dense_assignment_loop<>::run(kernel, alignedEnd, size); } }; template<typename Kernel> struct dense_assignment_loop<Kernel, LinearVectorizedTraversal, CompleteUnrolling> { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { typedef typename Kernel::DstEvaluatorType::XprType DstXprType; typedef typename Kernel::PacketType PacketType; enum { size = DstXprType::SizeAtCompileTime, packetSize =unpacket_traits<PacketType>::size, alignedSize = (size/packetSize)*packetSize }; copy_using_evaluator_innervec_CompleteUnrolling<Kernel, 0, alignedSize>::run(kernel); copy_using_evaluator_DefaultTraversal_CompleteUnrolling<Kernel, alignedSize, size>::run(kernel); } }; /************************** *** Inner vectorization *** **************************/ template<typename Kernel> struct dense_assignment_loop<Kernel, InnerVectorizedTraversal, NoUnrolling> { typedef typename Kernel::PacketType PacketType; enum { SrcAlignment = Kernel::AssignmentTraits::SrcAlignment, DstAlignment = Kernel::AssignmentTraits::DstAlignment }; EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { const Index innerSize = kernel.innerSize(); const Index outerSize = kernel.outerSize(); const Index packetSize = unpacket_traits<PacketType>::size; for(Index outer = 0; outer < outerSize; ++outer) for(Index inner = 0; inner < innerSize; inner+=packetSize) kernel.template assignPacketByOuterInner<DstAlignment, SrcAlignment, PacketType>(outer, inner); } }; template<typename Kernel> struct dense_assignment_loop<Kernel, InnerVectorizedTraversal, CompleteUnrolling> { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { typedef typename Kernel::DstEvaluatorType::XprType DstXprType; copy_using_evaluator_innervec_CompleteUnrolling<Kernel, 0, DstXprType::SizeAtCompileTime>::run(kernel); } }; template<typename Kernel> struct dense_assignment_loop<Kernel, InnerVectorizedTraversal, InnerUnrolling> { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { typedef typename Kernel::DstEvaluatorType::XprType DstXprType; typedef typename Kernel::AssignmentTraits Traits; const Index outerSize = kernel.outerSize(); for(Index outer = 0; outer < outerSize; ++outer) copy_using_evaluator_innervec_InnerUnrolling<Kernel, 0, DstXprType::InnerSizeAtCompileTime, Traits::SrcAlignment, Traits::DstAlignment>::run(kernel, outer); } }; /*********************** *** Linear traversal *** ***********************/ template<typename Kernel> struct dense_assignment_loop<Kernel, LinearTraversal, NoUnrolling> { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { const Index size = kernel.size(); for(Index i = 0; i < size; ++i) kernel.assignCoeff(i); } }; template<typename Kernel> struct dense_assignment_loop<Kernel, LinearTraversal, CompleteUnrolling> { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { typedef typename Kernel::DstEvaluatorType::XprType DstXprType; copy_using_evaluator_LinearTraversal_CompleteUnrolling<Kernel, 0, DstXprType::SizeAtCompileTime>::run(kernel); } }; /************************** *** Slice vectorization *** ***************************/ template<typename Kernel> struct dense_assignment_loop<Kernel, SliceVectorizedTraversal, NoUnrolling> { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { typedef typename Kernel::Scalar Scalar; typedef typename Kernel::PacketType PacketType; enum { packetSize = unpacket_traits<PacketType>::size, requestedAlignment = int(Kernel::AssignmentTraits::InnerRequiredAlignment), alignable = packet_traits<Scalar>::AlignedOnScalar || int(Kernel::AssignmentTraits::DstAlignment)>=sizeof(Scalar), dstIsAligned = int(Kernel::AssignmentTraits::DstAlignment)>=int(requestedAlignment), dstAlignment = alignable ? int(requestedAlignment) : int(Kernel::AssignmentTraits::DstAlignment) }; const Scalar *dst_ptr = kernel.dstDataPtr(); if((!bool(dstIsAligned)) && (UIntPtr(dst_ptr) % sizeof(Scalar))>0) { // the pointer is not aligend-on scalar, so alignment is not possible return dense_assignment_loop<Kernel,DefaultTraversal,NoUnrolling>::run(kernel); } const Index packetAlignedMask = packetSize - 1; const Index innerSize = kernel.innerSize(); const Index outerSize = kernel.outerSize(); const Index alignedStep = alignable ? (packetSize - kernel.outerStride() % packetSize) & packetAlignedMask : 0; Index alignedStart = ((!alignable) || bool(dstIsAligned)) ? 0 : internal::first_aligned<requestedAlignment>(dst_ptr, innerSize); for(Index outer = 0; outer < outerSize; ++outer) { const Index alignedEnd = alignedStart + ((innerSize-alignedStart) & ~packetAlignedMask); // do the non-vectorizable part of the assignment for(Index inner = 0; inner<alignedStart ; ++inner) kernel.assignCoeffByOuterInner(outer, inner); // do the vectorizable part of the assignment for(Index inner = alignedStart; inner<alignedEnd; inner+=packetSize) kernel.template assignPacketByOuterInner<dstAlignment, Unaligned, PacketType>(outer, inner); // do the non-vectorizable part of the assignment for(Index inner = alignedEnd; inner<innerSize ; ++inner) kernel.assignCoeffByOuterInner(outer, inner); alignedStart = numext::mini((alignedStart+alignedStep)%packetSize, innerSize); } } }; #if EIGEN_UNALIGNED_VECTORIZE template<typename Kernel> struct dense_assignment_loop<Kernel, SliceVectorizedTraversal, InnerUnrolling> { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel) { typedef typename Kernel::DstEvaluatorType::XprType DstXprType; typedef typename Kernel::PacketType PacketType; enum { size = DstXprType::InnerSizeAtCompileTime, packetSize =unpacket_traits<PacketType>::size, vectorizableSize = (size/packetSize)*packetSize }; for(Index outer = 0; outer < kernel.outerSize(); ++outer) { copy_using_evaluator_innervec_InnerUnrolling<Kernel, 0, vectorizableSize, 0, 0>::run(kernel, outer); copy_using_evaluator_DefaultTraversal_InnerUnrolling<Kernel, vectorizableSize, size>::run(kernel, outer); } } }; #endif /*************************************************************************** * Part 4 : Generic dense assignment kernel ***************************************************************************/ // This class generalize the assignment of a coefficient (or packet) from one dense evaluator // to another dense writable evaluator. // It is parametrized by the two evaluators, and the actual assignment functor. // This abstraction level permits to keep the evaluation loops as simple and as generic as possible. // One can customize the assignment using this generic dense_assignment_kernel with different // functors, or by completely overloading it, by-passing a functor. template<typename DstEvaluatorTypeT, typename SrcEvaluatorTypeT, typename Functor, int Version = Specialized> class generic_dense_assignment_kernel { protected: typedef typename DstEvaluatorTypeT::XprType DstXprType; typedef typename SrcEvaluatorTypeT::XprType SrcXprType; public: typedef DstEvaluatorTypeT DstEvaluatorType; typedef SrcEvaluatorTypeT SrcEvaluatorType; typedef typename DstEvaluatorType::Scalar Scalar; typedef copy_using_evaluator_traits<DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor> AssignmentTraits; typedef typename AssignmentTraits::PacketType PacketType; EIGEN_DEVICE_FUNC generic_dense_assignment_kernel(DstEvaluatorType &dst, const SrcEvaluatorType &src, const Functor &func, DstXprType& dstExpr) : m_dst(dst), m_src(src), m_functor(func), m_dstExpr(dstExpr) { #ifdef EIGEN_DEBUG_ASSIGN AssignmentTraits::debug(); #endif } EIGEN_DEVICE_FUNC Index size() const { return m_dstExpr.size(); } EIGEN_DEVICE_FUNC Index innerSize() const { return m_dstExpr.innerSize(); } EIGEN_DEVICE_FUNC Index outerSize() const { return m_dstExpr.outerSize(); } EIGEN_DEVICE_FUNC Index rows() const { return m_dstExpr.rows(); } EIGEN_DEVICE_FUNC Index cols() const { return m_dstExpr.cols(); } EIGEN_DEVICE_FUNC Index outerStride() const { return m_dstExpr.outerStride(); } EIGEN_DEVICE_FUNC DstEvaluatorType& dstEvaluator() { return m_dst; } EIGEN_DEVICE_FUNC const SrcEvaluatorType& srcEvaluator() const { return m_src; } /// Assign src(row,col) to dst(row,col) through the assignment functor. EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Index row, Index col) { m_functor.assignCoeff(m_dst.coeffRef(row,col), m_src.coeff(row,col)); } /// \sa assignCoeff(Index,Index) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Index index) { m_functor.assignCoeff(m_dst.coeffRef(index), m_src.coeff(index)); } /// \sa assignCoeff(Index,Index) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeffByOuterInner(Index outer, Index inner) { Index row = rowIndexByOuterInner(outer, inner); Index col = colIndexByOuterInner(outer, inner); assignCoeff(row, col); } template<int StoreMode, int LoadMode, typename PacketType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignPacket(Index row, Index col) { m_functor.template assignPacket<StoreMode>(&m_dst.coeffRef(row,col), m_src.template packet<LoadMode,PacketType>(row,col)); } template<int StoreMode, int LoadMode, typename PacketType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignPacket(Index index) { m_functor.template assignPacket<StoreMode>(&m_dst.coeffRef(index), m_src.template packet<LoadMode,PacketType>(index)); } template<int StoreMode, int LoadMode, typename PacketType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignPacketByOuterInner(Index outer, Index inner) { Index row = rowIndexByOuterInner(outer, inner); Index col = colIndexByOuterInner(outer, inner); assignPacket<StoreMode,LoadMode,PacketType>(row, col); } EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner) { typedef typename DstEvaluatorType::ExpressionTraits Traits; return int(Traits::RowsAtCompileTime) == 1 ? 0 : int(Traits::ColsAtCompileTime) == 1 ? inner : int(DstEvaluatorType::Flags)&RowMajorBit ? outer : inner; } EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Index colIndexByOuterInner(Index outer, Index inner) { typedef typename DstEvaluatorType::ExpressionTraits Traits; return int(Traits::ColsAtCompileTime) == 1 ? 0 : int(Traits::RowsAtCompileTime) == 1 ? inner : int(DstEvaluatorType::Flags)&RowMajorBit ? inner : outer; } EIGEN_DEVICE_FUNC const Scalar* dstDataPtr() const { return m_dstExpr.data(); } protected: DstEvaluatorType& m_dst; const SrcEvaluatorType& m_src; const Functor &m_functor; // TODO find a way to avoid the needs of the original expression DstXprType& m_dstExpr; }; /*************************************************************************** * Part 5 : Entry point for dense rectangular assignment ***************************************************************************/ template<typename DstXprType,typename SrcXprType, typename Functor> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize_if_allowed(DstXprType &dst, const SrcXprType& src, const Functor &/*func*/) { EIGEN_ONLY_USED_FOR_DEBUG(dst); EIGEN_ONLY_USED_FOR_DEBUG(src); eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); } template<typename DstXprType,typename SrcXprType, typename T1, typename T2> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize_if_allowed(DstXprType &dst, const SrcXprType& src, const internal::assign_op<T1,T2> &/*func*/) { Index dstRows = src.rows(); Index dstCols = src.cols(); if(((dst.rows()!=dstRows) || (dst.cols()!=dstCols))) dst.resize(dstRows, dstCols); eigen_assert(dst.rows() == dstRows && dst.cols() == dstCols); } template<typename DstXprType, typename SrcXprType, typename Functor> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_dense_assignment_loop(DstXprType& dst, const SrcXprType& src, const Functor &func) { typedef evaluator<DstXprType> DstEvaluatorType; typedef evaluator<SrcXprType> SrcEvaluatorType; SrcEvaluatorType srcEvaluator(src); // NOTE To properly handle A = (A*A.transpose())/s with A rectangular, // we need to resize the destination after the source evaluator has been created. resize_if_allowed(dst, src, func); DstEvaluatorType dstEvaluator(dst); typedef generic_dense_assignment_kernel<DstEvaluatorType,SrcEvaluatorType,Functor> Kernel; Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived()); dense_assignment_loop<Kernel>::run(kernel); } template<typename DstXprType, typename SrcXprType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_dense_assignment_loop(DstXprType& dst, const SrcXprType& src) { call_dense_assignment_loop(dst, src, internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>()); } /*************************************************************************** * Part 6 : Generic assignment ***************************************************************************/ // Based on the respective shapes of the destination and source, // the class AssignmentKind determine the kind of assignment mechanism. // AssignmentKind must define a Kind typedef. template<typename DstShape, typename SrcShape> struct AssignmentKind; // Assignement kind defined in this file: struct Dense2Dense {}; struct EigenBase2EigenBase {}; template<typename,typename> struct AssignmentKind { typedef EigenBase2EigenBase Kind; }; template<> struct AssignmentKind<DenseShape,DenseShape> { typedef Dense2Dense Kind; }; // This is the main assignment class template< typename DstXprType, typename SrcXprType, typename Functor, typename Kind = typename AssignmentKind< typename evaluator_traits<DstXprType>::Shape , typename evaluator_traits<SrcXprType>::Shape >::Kind, typename EnableIf = void> struct Assignment; // The only purpose of this call_assignment() function is to deal with noalias() / "assume-aliasing" and automatic transposition. // Indeed, I (Gael) think that this concept of "assume-aliasing" was a mistake, and it makes thing quite complicated. // So this intermediate function removes everything related to "assume-aliasing" such that Assignment // does not has to bother about these annoying details. template<typename Dst, typename Src> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment(Dst& dst, const Src& src) { call_assignment(dst, src, internal::assign_op<typename Dst::Scalar,typename Src::Scalar>()); } template<typename Dst, typename Src> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment(const Dst& dst, const Src& src) { call_assignment(dst, src, internal::assign_op<typename Dst::Scalar,typename Src::Scalar>()); } // Deal with "assume-aliasing" template<typename Dst, typename Src, typename Func> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment(Dst& dst, const Src& src, const Func& func, typename enable_if< evaluator_assume_aliasing<Src>::value, void*>::type = 0) { typename plain_matrix_type<Src>::type tmp(src); call_assignment_no_alias(dst, tmp, func); } template<typename Dst, typename Src, typename Func> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment(Dst& dst, const Src& src, const Func& func, typename enable_if<!evaluator_assume_aliasing<Src>::value, void*>::type = 0) { call_assignment_no_alias(dst, src, func); } // by-pass "assume-aliasing" // When there is no aliasing, we require that 'dst' has been properly resized template<typename Dst, template <typename> class StorageBase, typename Src, typename Func> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment(NoAlias<Dst,StorageBase>& dst, const Src& src, const Func& func) { call_assignment_no_alias(dst.expression(), src, func); } template<typename Dst, typename Src, typename Func> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment_no_alias(Dst& dst, const Src& src, const Func& func) { enum { NeedToTranspose = ( (int(Dst::RowsAtCompileTime) == 1 && int(Src::ColsAtCompileTime) == 1) || (int(Dst::ColsAtCompileTime) == 1 && int(Src::RowsAtCompileTime) == 1) ) && int(Dst::SizeAtCompileTime) != 1 }; typedef typename internal::conditional<NeedToTranspose, Transpose<Dst>, Dst>::type ActualDstTypeCleaned; typedef typename internal::conditional<NeedToTranspose, Transpose<Dst>, Dst&>::type ActualDstType; ActualDstType actualDst(dst); // TODO check whether this is the right place to perform these checks: EIGEN_STATIC_ASSERT_LVALUE(Dst) EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(ActualDstTypeCleaned,Src) EIGEN_CHECK_BINARY_COMPATIBILIY(Func,typename ActualDstTypeCleaned::Scalar,typename Src::Scalar); Assignment<ActualDstTypeCleaned,Src,Func>::run(actualDst, src, func); } template<typename Dst, typename Src> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment_no_alias(Dst& dst, const Src& src) { call_assignment_no_alias(dst, src, internal::assign_op<typename Dst::Scalar,typename Src::Scalar>()); } template<typename Dst, typename Src, typename Func> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment_no_alias_no_transpose(Dst& dst, const Src& src, const Func& func) { // TODO check whether this is the right place to perform these checks: EIGEN_STATIC_ASSERT_LVALUE(Dst) EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Dst,Src) EIGEN_CHECK_BINARY_COMPATIBILIY(Func,typename Dst::Scalar,typename Src::Scalar); Assignment<Dst,Src,Func>::run(dst, src, func); } template<typename Dst, typename Src> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment_no_alias_no_transpose(Dst& dst, const Src& src) { call_assignment_no_alias_no_transpose(dst, src, internal::assign_op<typename Dst::Scalar,typename Src::Scalar>()); } // forward declaration template<typename Dst, typename Src> void check_for_aliasing(const Dst &dst, const Src &src); // Generic Dense to Dense assignment // Note that the last template argument "Weak" is needed to make it possible to perform // both partial specialization+SFINAE without ambiguous specialization template< typename DstXprType, typename SrcXprType, typename Functor, typename Weak> struct Assignment<DstXprType, SrcXprType, Functor, Dense2Dense, Weak> { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const Functor &func) { #ifndef EIGEN_NO_DEBUG internal::check_for_aliasing(dst, src); #endif call_dense_assignment_loop(dst, src, func); } }; // Generic assignment through evalTo. // TODO: not sure we have to keep that one, but it helps porting current code to new evaluator mechanism. // Note that the last template argument "Weak" is needed to make it possible to perform // both partial specialization+SFINAE without ambiguous specialization template< typename DstXprType, typename SrcXprType, typename Functor, typename Weak> struct Assignment<DstXprType, SrcXprType, Functor, EigenBase2EigenBase, Weak> { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/) { Index dstRows = src.rows(); Index dstCols = src.cols(); if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) dst.resize(dstRows, dstCols); eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); src.evalTo(dst); } // NOTE The following two functions are templated to avoid their instanciation if not needed // This is needed because some expressions supports evalTo only and/or have 'void' as scalar type. template<typename SrcScalarType> EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,SrcScalarType> &/*func*/) { Index dstRows = src.rows(); Index dstCols = src.cols(); if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) dst.resize(dstRows, dstCols); eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); src.addTo(dst); } template<typename SrcScalarType> EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,SrcScalarType> &/*func*/) { Index dstRows = src.rows(); Index dstCols = src.cols(); if((dst.rows()!=dstRows) || (dst.cols()!=dstCols)) dst.resize(dstRows, dstCols); eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); src.subTo(dst); } }; } // namespace internal } // end namespace Eigen #endif // EIGEN_ASSIGN_EVALUATOR_H
38,120
39.727564
171
h
abess
abess-master/python/include/Eigen/src/Core/Assign_MKL.h
/* Copyright (c) 2011, Intel Corporation. All rights reserved. Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr> Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************** * Content : Eigen bindings to Intel(R) MKL * MKL VML support for coefficient-wise unary Eigen expressions like a=b.sin() ******************************************************************************** */ #ifndef EIGEN_ASSIGN_VML_H #define EIGEN_ASSIGN_VML_H namespace Eigen { namespace internal { template<typename Dst, typename Src> class vml_assign_traits { private: enum { DstHasDirectAccess = Dst::Flags & DirectAccessBit, SrcHasDirectAccess = Src::Flags & DirectAccessBit, StorageOrdersAgree = (int(Dst::IsRowMajor) == int(Src::IsRowMajor)), InnerSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::SizeAtCompileTime) : int(Dst::Flags)&RowMajorBit ? int(Dst::ColsAtCompileTime) : int(Dst::RowsAtCompileTime), InnerMaxSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::MaxSizeAtCompileTime) : int(Dst::Flags)&RowMajorBit ? int(Dst::MaxColsAtCompileTime) : int(Dst::MaxRowsAtCompileTime), MaxSizeAtCompileTime = Dst::SizeAtCompileTime, MightEnableVml = StorageOrdersAgree && DstHasDirectAccess && SrcHasDirectAccess && Src::InnerStrideAtCompileTime==1 && Dst::InnerStrideAtCompileTime==1, MightLinearize = MightEnableVml && (int(Dst::Flags) & int(Src::Flags) & LinearAccessBit), VmlSize = MightLinearize ? MaxSizeAtCompileTime : InnerMaxSize, LargeEnough = VmlSize==Dynamic || VmlSize>=EIGEN_MKL_VML_THRESHOLD }; public: enum { EnableVml = MightEnableVml && LargeEnough, Traversal = MightLinearize ? LinearTraversal : DefaultTraversal }; }; #define EIGEN_PP_EXPAND(ARG) ARG #if !defined (EIGEN_FAST_MATH) || (EIGEN_FAST_MATH != 1) #define EIGEN_VMLMODE_EXPAND_LA , VML_HA #else #define EIGEN_VMLMODE_EXPAND_LA , VML_LA #endif #define EIGEN_VMLMODE_EXPAND__ #define EIGEN_VMLMODE_PREFIX_LA vm #define EIGEN_VMLMODE_PREFIX__ v #define EIGEN_VMLMODE_PREFIX(VMLMODE) EIGEN_CAT(EIGEN_VMLMODE_PREFIX_,VMLMODE) #define EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE, VMLMODE) \ template< typename DstXprType, typename SrcXprNested> \ struct Assignment<DstXprType, CwiseUnaryOp<scalar_##EIGENOP##_op<EIGENTYPE>, SrcXprNested>, assign_op<EIGENTYPE,EIGENTYPE>, \ Dense2Dense, typename enable_if<vml_assign_traits<DstXprType,SrcXprNested>::EnableVml>::type> { \ typedef CwiseUnaryOp<scalar_##EIGENOP##_op<EIGENTYPE>, SrcXprNested> SrcXprType; \ static void run(DstXprType &dst, const SrcXprType &src, const assign_op<EIGENTYPE,EIGENTYPE> &/*func*/) { \ eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); \ if(vml_assign_traits<DstXprType,SrcXprNested>::Traversal==LinearTraversal) { \ VMLOP(dst.size(), (const VMLTYPE*)src.nestedExpression().data(), \ (VMLTYPE*)dst.data() EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_##VMLMODE) ); \ } else { \ const Index outerSize = dst.outerSize(); \ for(Index outer = 0; outer < outerSize; ++outer) { \ const EIGENTYPE *src_ptr = src.IsRowMajor ? &(src.nestedExpression().coeffRef(outer,0)) : \ &(src.nestedExpression().coeffRef(0, outer)); \ EIGENTYPE *dst_ptr = dst.IsRowMajor ? &(dst.coeffRef(outer,0)) : &(dst.coeffRef(0, outer)); \ VMLOP( dst.innerSize(), (const VMLTYPE*)src_ptr, \ (VMLTYPE*)dst_ptr EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_##VMLMODE)); \ } \ } \ } \ }; \ #define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP, VMLMODE) \ EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE),s##VMLOP), float, float, VMLMODE) \ EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE),d##VMLOP), double, double, VMLMODE) #define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_CPLX(EIGENOP, VMLOP, VMLMODE) \ EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE),c##VMLOP), scomplex, MKL_Complex8, VMLMODE) \ EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE),z##VMLOP), dcomplex, MKL_Complex16, VMLMODE) #define EIGEN_MKL_VML_DECLARE_UNARY_CALLS(EIGENOP, VMLOP, VMLMODE) \ EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP, VMLMODE) \ EIGEN_MKL_VML_DECLARE_UNARY_CALLS_CPLX(EIGENOP, VMLOP, VMLMODE) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(sin, Sin, LA) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(asin, Asin, LA) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(sinh, Sinh, LA) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(cos, Cos, LA) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(acos, Acos, LA) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(cosh, Cosh, LA) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(tan, Tan, LA) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(atan, Atan, LA) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(tanh, Tanh, LA) // EIGEN_MKL_VML_DECLARE_UNARY_CALLS(abs, Abs, _) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(exp, Exp, LA) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(log, Ln, LA) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(log10, Log10, LA) EIGEN_MKL_VML_DECLARE_UNARY_CALLS(sqrt, Sqrt, _) EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(square, Sqr, _) EIGEN_MKL_VML_DECLARE_UNARY_CALLS_CPLX(arg, Arg, _) EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(round, Round, _) EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(floor, Floor, _) EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(ceil, Ceil, _) #define EIGEN_MKL_VML_DECLARE_POW_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE, VMLMODE) \ template< typename DstXprType, typename SrcXprNested, typename Plain> \ struct Assignment<DstXprType, CwiseBinaryOp<scalar_##EIGENOP##_op<EIGENTYPE,EIGENTYPE>, SrcXprNested, \ const CwiseNullaryOp<internal::scalar_constant_op<EIGENTYPE>,Plain> >, assign_op<EIGENTYPE,EIGENTYPE>, \ Dense2Dense, typename enable_if<vml_assign_traits<DstXprType,SrcXprNested>::EnableVml>::type> { \ typedef CwiseBinaryOp<scalar_##EIGENOP##_op<EIGENTYPE,EIGENTYPE>, SrcXprNested, \ const CwiseNullaryOp<internal::scalar_constant_op<EIGENTYPE>,Plain> > SrcXprType; \ static void run(DstXprType &dst, const SrcXprType &src, const assign_op<EIGENTYPE,EIGENTYPE> &/*func*/) { \ eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols()); \ VMLTYPE exponent = reinterpret_cast<const VMLTYPE&>(src.rhs().functor().m_other); \ if(vml_assign_traits<DstXprType,SrcXprNested>::Traversal==LinearTraversal) \ { \ VMLOP( dst.size(), (const VMLTYPE*)src.lhs().data(), exponent, \ (VMLTYPE*)dst.data() EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_##VMLMODE) ); \ } else { \ const Index outerSize = dst.outerSize(); \ for(Index outer = 0; outer < outerSize; ++outer) { \ const EIGENTYPE *src_ptr = src.IsRowMajor ? &(src.lhs().coeffRef(outer,0)) : \ &(src.lhs().coeffRef(0, outer)); \ EIGENTYPE *dst_ptr = dst.IsRowMajor ? &(dst.coeffRef(outer,0)) : &(dst.coeffRef(0, outer)); \ VMLOP( dst.innerSize(), (const VMLTYPE*)src_ptr, exponent, \ (VMLTYPE*)dst_ptr EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_##VMLMODE)); \ } \ } \ } \ }; EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmsPowx, float, float, LA) EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmdPowx, double, double, LA) EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmcPowx, scomplex, MKL_Complex8, LA) EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmzPowx, dcomplex, MKL_Complex16, LA) } // end namespace internal } // end namespace Eigen #endif // EIGEN_ASSIGN_VML_H
12,221
68.050847
158
h
abess
abess-master/python/include/Eigen/src/Core/BandMatrix.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_BANDMATRIX_H #define EIGEN_BANDMATRIX_H namespace Eigen { namespace internal { template<typename Derived> class BandMatrixBase : public EigenBase<Derived> { public: enum { Flags = internal::traits<Derived>::Flags, CoeffReadCost = internal::traits<Derived>::CoeffReadCost, RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime, ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime, MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime, MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime, Supers = internal::traits<Derived>::Supers, Subs = internal::traits<Derived>::Subs, Options = internal::traits<Derived>::Options }; typedef typename internal::traits<Derived>::Scalar Scalar; typedef Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> DenseMatrixType; typedef typename DenseMatrixType::StorageIndex StorageIndex; typedef typename internal::traits<Derived>::CoefficientsType CoefficientsType; typedef EigenBase<Derived> Base; protected: enum { DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic)) ? 1 + Supers + Subs : Dynamic, SizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime) }; public: using Base::derived; using Base::rows; using Base::cols; /** \returns the number of super diagonals */ inline Index supers() const { return derived().supers(); } /** \returns the number of sub diagonals */ inline Index subs() const { return derived().subs(); } /** \returns an expression of the underlying coefficient matrix */ inline const CoefficientsType& coeffs() const { return derived().coeffs(); } /** \returns an expression of the underlying coefficient matrix */ inline CoefficientsType& coeffs() { return derived().coeffs(); } /** \returns a vector expression of the \a i -th column, * only the meaningful part is returned. * \warning the internal storage must be column major. */ inline Block<CoefficientsType,Dynamic,1> col(Index i) { EIGEN_STATIC_ASSERT((Options&RowMajor)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); Index start = 0; Index len = coeffs().rows(); if (i<=supers()) { start = supers()-i; len = (std::min)(rows(),std::max<Index>(0,coeffs().rows() - (supers()-i))); } else if (i>=rows()-subs()) len = std::max<Index>(0,coeffs().rows() - (i + 1 - rows() + subs())); return Block<CoefficientsType,Dynamic,1>(coeffs(), start, i, len, 1); } /** \returns a vector expression of the main diagonal */ inline Block<CoefficientsType,1,SizeAtCompileTime> diagonal() { return Block<CoefficientsType,1,SizeAtCompileTime>(coeffs(),supers(),0,1,(std::min)(rows(),cols())); } /** \returns a vector expression of the main diagonal (const version) */ inline const Block<const CoefficientsType,1,SizeAtCompileTime> diagonal() const { return Block<const CoefficientsType,1,SizeAtCompileTime>(coeffs(),supers(),0,1,(std::min)(rows(),cols())); } template<int Index> struct DiagonalIntReturnType { enum { ReturnOpposite = (Options&SelfAdjoint) && (((Index)>0 && Supers==0) || ((Index)<0 && Subs==0)), Conjugate = ReturnOpposite && NumTraits<Scalar>::IsComplex, ActualIndex = ReturnOpposite ? -Index : Index, DiagonalSize = (RowsAtCompileTime==Dynamic || ColsAtCompileTime==Dynamic) ? Dynamic : (ActualIndex<0 ? EIGEN_SIZE_MIN_PREFER_DYNAMIC(ColsAtCompileTime, RowsAtCompileTime + ActualIndex) : EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime - ActualIndex)) }; typedef Block<CoefficientsType,1, DiagonalSize> BuildType; typedef typename internal::conditional<Conjugate, CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>,BuildType >, BuildType>::type Type; }; /** \returns a vector expression of the \a N -th sub or super diagonal */ template<int N> inline typename DiagonalIntReturnType<N>::Type diagonal() { return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers()-N, (std::max)(0,N), 1, diagonalLength(N)); } /** \returns a vector expression of the \a N -th sub or super diagonal */ template<int N> inline const typename DiagonalIntReturnType<N>::Type diagonal() const { return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers()-N, (std::max)(0,N), 1, diagonalLength(N)); } /** \returns a vector expression of the \a i -th sub or super diagonal */ inline Block<CoefficientsType,1,Dynamic> diagonal(Index i) { eigen_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers())); return Block<CoefficientsType,1,Dynamic>(coeffs(), supers()-i, std::max<Index>(0,i), 1, diagonalLength(i)); } /** \returns a vector expression of the \a i -th sub or super diagonal */ inline const Block<const CoefficientsType,1,Dynamic> diagonal(Index i) const { eigen_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers())); return Block<const CoefficientsType,1,Dynamic>(coeffs(), supers()-i, std::max<Index>(0,i), 1, diagonalLength(i)); } template<typename Dest> inline void evalTo(Dest& dst) const { dst.resize(rows(),cols()); dst.setZero(); dst.diagonal() = diagonal(); for (Index i=1; i<=supers();++i) dst.diagonal(i) = diagonal(i); for (Index i=1; i<=subs();++i) dst.diagonal(-i) = diagonal(-i); } DenseMatrixType toDenseMatrix() const { DenseMatrixType res(rows(),cols()); evalTo(res); return res; } protected: inline Index diagonalLength(Index i) const { return i<0 ? (std::min)(cols(),rows()+i) : (std::min)(rows(),cols()-i); } }; /** * \class BandMatrix * \ingroup Core_Module * * \brief Represents a rectangular matrix with a banded storage * * \tparam _Scalar Numeric type, i.e. float, double, int * \tparam _Rows Number of rows, or \b Dynamic * \tparam _Cols Number of columns, or \b Dynamic * \tparam _Supers Number of super diagonal * \tparam _Subs Number of sub diagonal * \tparam _Options A combination of either \b #RowMajor or \b #ColMajor, and of \b #SelfAdjoint * The former controls \ref TopicStorageOrders "storage order", and defaults to * column-major. The latter controls whether the matrix represents a selfadjoint * matrix in which case either Supers of Subs have to be null. * * \sa class TridiagonalMatrix */ template<typename _Scalar, int _Rows, int _Cols, int _Supers, int _Subs, int _Options> struct traits<BandMatrix<_Scalar,_Rows,_Cols,_Supers,_Subs,_Options> > { typedef _Scalar Scalar; typedef Dense StorageKind; typedef Eigen::Index StorageIndex; enum { CoeffReadCost = NumTraits<Scalar>::ReadCost, RowsAtCompileTime = _Rows, ColsAtCompileTime = _Cols, MaxRowsAtCompileTime = _Rows, MaxColsAtCompileTime = _Cols, Flags = LvalueBit, Supers = _Supers, Subs = _Subs, Options = _Options, DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic)) ? 1 + Supers + Subs : Dynamic }; typedef Matrix<Scalar,DataRowsAtCompileTime,ColsAtCompileTime,Options&RowMajor?RowMajor:ColMajor> CoefficientsType; }; template<typename _Scalar, int Rows, int Cols, int Supers, int Subs, int Options> class BandMatrix : public BandMatrixBase<BandMatrix<_Scalar,Rows,Cols,Supers,Subs,Options> > { public: typedef typename internal::traits<BandMatrix>::Scalar Scalar; typedef typename internal::traits<BandMatrix>::StorageIndex StorageIndex; typedef typename internal::traits<BandMatrix>::CoefficientsType CoefficientsType; explicit inline BandMatrix(Index rows=Rows, Index cols=Cols, Index supers=Supers, Index subs=Subs) : m_coeffs(1+supers+subs,cols), m_rows(rows), m_supers(supers), m_subs(subs) { } /** \returns the number of columns */ inline Index rows() const { return m_rows.value(); } /** \returns the number of rows */ inline Index cols() const { return m_coeffs.cols(); } /** \returns the number of super diagonals */ inline Index supers() const { return m_supers.value(); } /** \returns the number of sub diagonals */ inline Index subs() const { return m_subs.value(); } inline const CoefficientsType& coeffs() const { return m_coeffs; } inline CoefficientsType& coeffs() { return m_coeffs; } protected: CoefficientsType m_coeffs; internal::variable_if_dynamic<Index, Rows> m_rows; internal::variable_if_dynamic<Index, Supers> m_supers; internal::variable_if_dynamic<Index, Subs> m_subs; }; template<typename _CoefficientsType,int _Rows, int _Cols, int _Supers, int _Subs,int _Options> class BandMatrixWrapper; template<typename _CoefficientsType,int _Rows, int _Cols, int _Supers, int _Subs,int _Options> struct traits<BandMatrixWrapper<_CoefficientsType,_Rows,_Cols,_Supers,_Subs,_Options> > { typedef typename _CoefficientsType::Scalar Scalar; typedef typename _CoefficientsType::StorageKind StorageKind; typedef typename _CoefficientsType::StorageIndex StorageIndex; enum { CoeffReadCost = internal::traits<_CoefficientsType>::CoeffReadCost, RowsAtCompileTime = _Rows, ColsAtCompileTime = _Cols, MaxRowsAtCompileTime = _Rows, MaxColsAtCompileTime = _Cols, Flags = LvalueBit, Supers = _Supers, Subs = _Subs, Options = _Options, DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic)) ? 1 + Supers + Subs : Dynamic }; typedef _CoefficientsType CoefficientsType; }; template<typename _CoefficientsType,int _Rows, int _Cols, int _Supers, int _Subs,int _Options> class BandMatrixWrapper : public BandMatrixBase<BandMatrixWrapper<_CoefficientsType,_Rows,_Cols,_Supers,_Subs,_Options> > { public: typedef typename internal::traits<BandMatrixWrapper>::Scalar Scalar; typedef typename internal::traits<BandMatrixWrapper>::CoefficientsType CoefficientsType; typedef typename internal::traits<BandMatrixWrapper>::StorageIndex StorageIndex; explicit inline BandMatrixWrapper(const CoefficientsType& coeffs, Index rows=_Rows, Index cols=_Cols, Index supers=_Supers, Index subs=_Subs) : m_coeffs(coeffs), m_rows(rows), m_supers(supers), m_subs(subs) { EIGEN_UNUSED_VARIABLE(cols); //internal::assert(coeffs.cols()==cols() && (supers()+subs()+1)==coeffs.rows()); } /** \returns the number of columns */ inline Index rows() const { return m_rows.value(); } /** \returns the number of rows */ inline Index cols() const { return m_coeffs.cols(); } /** \returns the number of super diagonals */ inline Index supers() const { return m_supers.value(); } /** \returns the number of sub diagonals */ inline Index subs() const { return m_subs.value(); } inline const CoefficientsType& coeffs() const { return m_coeffs; } protected: const CoefficientsType& m_coeffs; internal::variable_if_dynamic<Index, _Rows> m_rows; internal::variable_if_dynamic<Index, _Supers> m_supers; internal::variable_if_dynamic<Index, _Subs> m_subs; }; /** * \class TridiagonalMatrix * \ingroup Core_Module * * \brief Represents a tridiagonal matrix with a compact banded storage * * \tparam Scalar Numeric type, i.e. float, double, int * \tparam Size Number of rows and cols, or \b Dynamic * \tparam Options Can be 0 or \b SelfAdjoint * * \sa class BandMatrix */ template<typename Scalar, int Size, int Options> class TridiagonalMatrix : public BandMatrix<Scalar,Size,Size,Options&SelfAdjoint?0:1,1,Options|RowMajor> { typedef BandMatrix<Scalar,Size,Size,Options&SelfAdjoint?0:1,1,Options|RowMajor> Base; typedef typename Base::StorageIndex StorageIndex; public: explicit TridiagonalMatrix(Index size = Size) : Base(size,size,Options&SelfAdjoint?0:1,1) {} inline typename Base::template DiagonalIntReturnType<1>::Type super() { return Base::template diagonal<1>(); } inline const typename Base::template DiagonalIntReturnType<1>::Type super() const { return Base::template diagonal<1>(); } inline typename Base::template DiagonalIntReturnType<-1>::Type sub() { return Base::template diagonal<-1>(); } inline const typename Base::template DiagonalIntReturnType<-1>::Type sub() const { return Base::template diagonal<-1>(); } protected: }; struct BandShape {}; template<typename _Scalar, int _Rows, int _Cols, int _Supers, int _Subs, int _Options> struct evaluator_traits<BandMatrix<_Scalar,_Rows,_Cols,_Supers,_Subs,_Options> > : public evaluator_traits_base<BandMatrix<_Scalar,_Rows,_Cols,_Supers,_Subs,_Options> > { typedef BandShape Shape; }; template<typename _CoefficientsType,int _Rows, int _Cols, int _Supers, int _Subs,int _Options> struct evaluator_traits<BandMatrixWrapper<_CoefficientsType,_Rows,_Cols,_Supers,_Subs,_Options> > : public evaluator_traits_base<BandMatrixWrapper<_CoefficientsType,_Rows,_Cols,_Supers,_Subs,_Options> > { typedef BandShape Shape; }; template<> struct AssignmentKind<DenseShape,BandShape> { typedef EigenBase2EigenBase Kind; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_BANDMATRIX_H
13,910
38.29661
145
h
abess
abess-master/python/include/Eigen/src/Core/Block.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_BLOCK_H #define EIGEN_BLOCK_H namespace Eigen { namespace internal { template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel> struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel> > : traits<XprType> { typedef typename traits<XprType>::Scalar Scalar; typedef typename traits<XprType>::StorageKind StorageKind; typedef typename traits<XprType>::XprKind XprKind; typedef typename ref_selector<XprType>::type XprTypeNested; typedef typename remove_reference<XprTypeNested>::type _XprTypeNested; enum{ MatrixRows = traits<XprType>::RowsAtCompileTime, MatrixCols = traits<XprType>::ColsAtCompileTime, RowsAtCompileTime = MatrixRows == 0 ? 0 : BlockRows, ColsAtCompileTime = MatrixCols == 0 ? 0 : BlockCols, MaxRowsAtCompileTime = BlockRows==0 ? 0 : RowsAtCompileTime != Dynamic ? int(RowsAtCompileTime) : int(traits<XprType>::MaxRowsAtCompileTime), MaxColsAtCompileTime = BlockCols==0 ? 0 : ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime) : int(traits<XprType>::MaxColsAtCompileTime), XprTypeIsRowMajor = (int(traits<XprType>::Flags)&RowMajorBit) != 0, IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1 : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0 : XprTypeIsRowMajor, HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor), InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime), InnerStrideAtCompileTime = HasSameStorageOrderAsXprType ? int(inner_stride_at_compile_time<XprType>::ret) : int(outer_stride_at_compile_time<XprType>::ret), OuterStrideAtCompileTime = HasSameStorageOrderAsXprType ? int(outer_stride_at_compile_time<XprType>::ret) : int(inner_stride_at_compile_time<XprType>::ret), // FIXME, this traits is rather specialized for dense object and it needs to be cleaned further FlagsLvalueBit = is_lvalue<XprType>::value ? LvalueBit : 0, FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0, Flags = (traits<XprType>::Flags & (DirectAccessBit | (InnerPanel?CompressedAccessBit:0))) | FlagsLvalueBit | FlagsRowMajorBit, // FIXME DirectAccessBit should not be handled by expressions // // Alignment is needed by MapBase's assertions // We can sefely set it to false here. Internal alignment errors will be detected by an eigen_internal_assert in the respective evaluator Alignment = 0 }; }; template<typename XprType, int BlockRows=Dynamic, int BlockCols=Dynamic, bool InnerPanel = false, bool HasDirectAccess = internal::has_direct_access<XprType>::ret> class BlockImpl_dense; } // end namespace internal template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, typename StorageKind> class BlockImpl; /** \class Block * \ingroup Core_Module * * \brief Expression of a fixed-size or dynamic-size block * * \tparam XprType the type of the expression in which we are taking a block * \tparam BlockRows the number of rows of the block we are taking at compile time (optional) * \tparam BlockCols the number of columns of the block we are taking at compile time (optional) * \tparam InnerPanel is true, if the block maps to a set of rows of a row major matrix or * to set of columns of a column major matrix (optional). The parameter allows to determine * at compile time whether aligned access is possible on the block expression. * * This class represents an expression of either a fixed-size or dynamic-size block. It is the return * type of DenseBase::block(Index,Index,Index,Index) and DenseBase::block<int,int>(Index,Index) and * most of the time this is the only way it is used. * * However, if you want to directly maniputate block expressions, * for instance if you want to write a function returning such an expression, you * will need to use this class. * * Here is an example illustrating the dynamic case: * \include class_Block.cpp * Output: \verbinclude class_Block.out * * \note Even though this expression has dynamic size, in the case where \a XprType * has fixed size, this expression inherits a fixed maximal size which means that evaluating * it does not cause a dynamic memory allocation. * * Here is an example illustrating the fixed-size case: * \include class_FixedBlock.cpp * Output: \verbinclude class_FixedBlock.out * * \sa DenseBase::block(Index,Index,Index,Index), DenseBase::block(Index,Index), class VectorBlock */ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel> class Block : public BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, typename internal::traits<XprType>::StorageKind> { typedef BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, typename internal::traits<XprType>::StorageKind> Impl; public: //typedef typename Impl::Base Base; typedef Impl Base; EIGEN_GENERIC_PUBLIC_INTERFACE(Block) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block) typedef typename internal::remove_all<XprType>::type NestedExpression; /** Column or Row constructor */ EIGEN_DEVICE_FUNC inline Block(XprType& xpr, Index i) : Impl(xpr,i) { eigen_assert( (i>=0) && ( ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && i<xpr.rows()) ||((BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) && i<xpr.cols()))); } /** Fixed-size constructor */ EIGEN_DEVICE_FUNC inline Block(XprType& xpr, Index startRow, Index startCol) : Impl(xpr, startRow, startCol) { EIGEN_STATIC_ASSERT(RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic,THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE) eigen_assert(startRow >= 0 && BlockRows >= 0 && startRow + BlockRows <= xpr.rows() && startCol >= 0 && BlockCols >= 0 && startCol + BlockCols <= xpr.cols()); } /** Dynamic-size constructor */ EIGEN_DEVICE_FUNC inline Block(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) : Impl(xpr, startRow, startCol, blockRows, blockCols) { eigen_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows) && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols)); eigen_assert(startRow >= 0 && blockRows >= 0 && startRow <= xpr.rows() - blockRows && startCol >= 0 && blockCols >= 0 && startCol <= xpr.cols() - blockCols); } }; // The generic default implementation for dense block simplu forward to the internal::BlockImpl_dense // that must be specialized for direct and non-direct access... template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel> class BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, Dense> : public internal::BlockImpl_dense<XprType, BlockRows, BlockCols, InnerPanel> { typedef internal::BlockImpl_dense<XprType, BlockRows, BlockCols, InnerPanel> Impl; typedef typename XprType::StorageIndex StorageIndex; public: typedef Impl Base; EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) EIGEN_DEVICE_FUNC inline BlockImpl(XprType& xpr, Index i) : Impl(xpr,i) {} EIGEN_DEVICE_FUNC inline BlockImpl(XprType& xpr, Index startRow, Index startCol) : Impl(xpr, startRow, startCol) {} EIGEN_DEVICE_FUNC inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) : Impl(xpr, startRow, startCol, blockRows, blockCols) {} }; namespace internal { /** \internal Internal implementation of dense Blocks in the general case. */ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool HasDirectAccess> class BlockImpl_dense : public internal::dense_xpr_base<Block<XprType, BlockRows, BlockCols, InnerPanel> >::type { typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType; typedef typename internal::ref_selector<XprType>::non_const_type XprTypeNested; public: typedef typename internal::dense_xpr_base<BlockType>::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(BlockType) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl_dense) // class InnerIterator; // FIXME apparently never used /** Column or Row constructor */ EIGEN_DEVICE_FUNC inline BlockImpl_dense(XprType& xpr, Index i) : m_xpr(xpr), // It is a row if and only if BlockRows==1 and BlockCols==XprType::ColsAtCompileTime, // and it is a column if and only if BlockRows==XprType::RowsAtCompileTime and BlockCols==1, // all other cases are invalid. // The case a 1x1 matrix seems ambiguous, but the result is the same anyway. m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0), m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0), m_blockRows(BlockRows==1 ? 1 : xpr.rows()), m_blockCols(BlockCols==1 ? 1 : xpr.cols()) {} /** Fixed-size constructor */ EIGEN_DEVICE_FUNC inline BlockImpl_dense(XprType& xpr, Index startRow, Index startCol) : m_xpr(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(BlockRows), m_blockCols(BlockCols) {} /** Dynamic-size constructor */ EIGEN_DEVICE_FUNC inline BlockImpl_dense(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) : m_xpr(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(blockRows), m_blockCols(blockCols) {} EIGEN_DEVICE_FUNC inline Index rows() const { return m_blockRows.value(); } EIGEN_DEVICE_FUNC inline Index cols() const { return m_blockCols.value(); } EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index rowId, Index colId) { EIGEN_STATIC_ASSERT_LVALUE(XprType) return m_xpr.coeffRef(rowId + m_startRow.value(), colId + m_startCol.value()); } EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index rowId, Index colId) const { return m_xpr.derived().coeffRef(rowId + m_startRow.value(), colId + m_startCol.value()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index rowId, Index colId) const { return m_xpr.coeff(rowId + m_startRow.value(), colId + m_startCol.value()); } EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index index) { EIGEN_STATIC_ASSERT_LVALUE(XprType) return m_xpr.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); } EIGEN_DEVICE_FUNC inline const Scalar& coeffRef(Index index) const { return m_xpr.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); } EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index index) const { return m_xpr.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); } template<int LoadMode> inline PacketScalar packet(Index rowId, Index colId) const { return m_xpr.template packet<Unaligned>(rowId + m_startRow.value(), colId + m_startCol.value()); } template<int LoadMode> inline void writePacket(Index rowId, Index colId, const PacketScalar& val) { m_xpr.template writePacket<Unaligned>(rowId + m_startRow.value(), colId + m_startCol.value(), val); } template<int LoadMode> inline PacketScalar packet(Index index) const { return m_xpr.template packet<Unaligned> (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); } template<int LoadMode> inline void writePacket(Index index, const PacketScalar& val) { m_xpr.template writePacket<Unaligned> (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0), val); } #ifdef EIGEN_PARSED_BY_DOXYGEN /** \sa MapBase::data() */ EIGEN_DEVICE_FUNC inline const Scalar* data() const; EIGEN_DEVICE_FUNC inline Index innerStride() const; EIGEN_DEVICE_FUNC inline Index outerStride() const; #endif EIGEN_DEVICE_FUNC const typename internal::remove_all<XprTypeNested>::type& nestedExpression() const { return m_xpr; } EIGEN_DEVICE_FUNC XprType& nestedExpression() { return m_xpr; } EIGEN_DEVICE_FUNC StorageIndex startRow() const { return m_startRow.value(); } EIGEN_DEVICE_FUNC StorageIndex startCol() const { return m_startCol.value(); } protected: XprTypeNested m_xpr; const internal::variable_if_dynamic<StorageIndex, (XprType::RowsAtCompileTime == 1 && BlockRows==1) ? 0 : Dynamic> m_startRow; const internal::variable_if_dynamic<StorageIndex, (XprType::ColsAtCompileTime == 1 && BlockCols==1) ? 0 : Dynamic> m_startCol; const internal::variable_if_dynamic<StorageIndex, RowsAtCompileTime> m_blockRows; const internal::variable_if_dynamic<StorageIndex, ColsAtCompileTime> m_blockCols; }; /** \internal Internal implementation of dense Blocks in the direct access case.*/ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel> class BlockImpl_dense<XprType,BlockRows,BlockCols, InnerPanel,true> : public MapBase<Block<XprType, BlockRows, BlockCols, InnerPanel> > { typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType; typedef typename internal::ref_selector<XprType>::non_const_type XprTypeNested; enum { XprTypeIsRowMajor = (int(traits<XprType>::Flags)&RowMajorBit) != 0 }; public: typedef MapBase<BlockType> Base; EIGEN_DENSE_PUBLIC_INTERFACE(BlockType) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl_dense) /** Column or Row constructor */ EIGEN_DEVICE_FUNC inline BlockImpl_dense(XprType& xpr, Index i) : Base(xpr.data() + i * ( ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && (!XprTypeIsRowMajor)) || ((BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) && ( XprTypeIsRowMajor)) ? xpr.innerStride() : xpr.outerStride()), BlockRows==1 ? 1 : xpr.rows(), BlockCols==1 ? 1 : xpr.cols()), m_xpr(xpr), m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0), m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0) { init(); } /** Fixed-size constructor */ EIGEN_DEVICE_FUNC inline BlockImpl_dense(XprType& xpr, Index startRow, Index startCol) : Base(xpr.data()+xpr.innerStride()*(XprTypeIsRowMajor?startCol:startRow) + xpr.outerStride()*(XprTypeIsRowMajor?startRow:startCol)), m_xpr(xpr), m_startRow(startRow), m_startCol(startCol) { init(); } /** Dynamic-size constructor */ EIGEN_DEVICE_FUNC inline BlockImpl_dense(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols) : Base(xpr.data()+xpr.innerStride()*(XprTypeIsRowMajor?startCol:startRow) + xpr.outerStride()*(XprTypeIsRowMajor?startRow:startCol), blockRows, blockCols), m_xpr(xpr), m_startRow(startRow), m_startCol(startCol) { init(); } EIGEN_DEVICE_FUNC const typename internal::remove_all<XprTypeNested>::type& nestedExpression() const { return m_xpr; } EIGEN_DEVICE_FUNC XprType& nestedExpression() { return m_xpr; } /** \sa MapBase::innerStride() */ EIGEN_DEVICE_FUNC inline Index innerStride() const { return internal::traits<BlockType>::HasSameStorageOrderAsXprType ? m_xpr.innerStride() : m_xpr.outerStride(); } /** \sa MapBase::outerStride() */ EIGEN_DEVICE_FUNC inline Index outerStride() const { return m_outerStride; } EIGEN_DEVICE_FUNC StorageIndex startRow() const { return m_startRow.value(); } EIGEN_DEVICE_FUNC StorageIndex startCol() const { return m_startCol.value(); } #ifndef __SUNPRO_CC // FIXME sunstudio is not friendly with the above friend... // META-FIXME there is no 'friend' keyword around here. Is this obsolete? protected: #endif #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal used by allowAligned() */ EIGEN_DEVICE_FUNC inline BlockImpl_dense(XprType& xpr, const Scalar* data, Index blockRows, Index blockCols) : Base(data, blockRows, blockCols), m_xpr(xpr) { init(); } #endif protected: EIGEN_DEVICE_FUNC void init() { m_outerStride = internal::traits<BlockType>::HasSameStorageOrderAsXprType ? m_xpr.outerStride() : m_xpr.innerStride(); } XprTypeNested m_xpr; const internal::variable_if_dynamic<StorageIndex, (XprType::RowsAtCompileTime == 1 && BlockRows==1) ? 0 : Dynamic> m_startRow; const internal::variable_if_dynamic<StorageIndex, (XprType::ColsAtCompileTime == 1 && BlockCols==1) ? 0 : Dynamic> m_startCol; Index m_outerStride; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_BLOCK_H
18,064
38.878587
161
h
abess
abess-master/python/include/Eigen/src/Core/BooleanRedux.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ALLANDANY_H #define EIGEN_ALLANDANY_H namespace Eigen { namespace internal { template<typename Derived, int UnrollCount> struct all_unroller { typedef typename Derived::ExpressionTraits Traits; enum { col = (UnrollCount-1) / Traits::RowsAtCompileTime, row = (UnrollCount-1) % Traits::RowsAtCompileTime }; static inline bool run(const Derived &mat) { return all_unroller<Derived, UnrollCount-1>::run(mat) && mat.coeff(row, col); } }; template<typename Derived> struct all_unroller<Derived, 0> { static inline bool run(const Derived &/*mat*/) { return true; } }; template<typename Derived> struct all_unroller<Derived, Dynamic> { static inline bool run(const Derived &) { return false; } }; template<typename Derived, int UnrollCount> struct any_unroller { typedef typename Derived::ExpressionTraits Traits; enum { col = (UnrollCount-1) / Traits::RowsAtCompileTime, row = (UnrollCount-1) % Traits::RowsAtCompileTime }; static inline bool run(const Derived &mat) { return any_unroller<Derived, UnrollCount-1>::run(mat) || mat.coeff(row, col); } }; template<typename Derived> struct any_unroller<Derived, 0> { static inline bool run(const Derived & /*mat*/) { return false; } }; template<typename Derived> struct any_unroller<Derived, Dynamic> { static inline bool run(const Derived &) { return false; } }; } // end namespace internal /** \returns true if all coefficients are true * * Example: \include MatrixBase_all.cpp * Output: \verbinclude MatrixBase_all.out * * \sa any(), Cwise::operator<() */ template<typename Derived> inline bool DenseBase<Derived>::all() const { typedef internal::evaluator<Derived> Evaluator; enum { unroll = SizeAtCompileTime != Dynamic && SizeAtCompileTime * (Evaluator::CoeffReadCost + NumTraits<Scalar>::AddCost) <= EIGEN_UNROLLING_LIMIT }; Evaluator evaluator(derived()); if(unroll) return internal::all_unroller<Evaluator, unroll ? int(SizeAtCompileTime) : Dynamic>::run(evaluator); else { for(Index j = 0; j < cols(); ++j) for(Index i = 0; i < rows(); ++i) if (!evaluator.coeff(i, j)) return false; return true; } } /** \returns true if at least one coefficient is true * * \sa all() */ template<typename Derived> inline bool DenseBase<Derived>::any() const { typedef internal::evaluator<Derived> Evaluator; enum { unroll = SizeAtCompileTime != Dynamic && SizeAtCompileTime * (Evaluator::CoeffReadCost + NumTraits<Scalar>::AddCost) <= EIGEN_UNROLLING_LIMIT }; Evaluator evaluator(derived()); if(unroll) return internal::any_unroller<Evaluator, unroll ? int(SizeAtCompileTime) : Dynamic>::run(evaluator); else { for(Index j = 0; j < cols(); ++j) for(Index i = 0; i < rows(); ++i) if (evaluator.coeff(i, j)) return true; return false; } } /** \returns the number of coefficients which evaluate to true * * \sa all(), any() */ template<typename Derived> inline Eigen::Index DenseBase<Derived>::count() const { return derived().template cast<bool>().template cast<Index>().sum(); } /** \returns true is \c *this contains at least one Not A Number (NaN). * * \sa allFinite() */ template<typename Derived> inline bool DenseBase<Derived>::hasNaN() const { #if EIGEN_COMP_MSVC || (defined __FAST_MATH__) return derived().array().isNaN().any(); #else return !((derived().array()==derived().array()).all()); #endif } /** \returns true if \c *this contains only finite numbers, i.e., no NaN and no +/-INF values. * * \sa hasNaN() */ template<typename Derived> inline bool DenseBase<Derived>::allFinite() const { #if EIGEN_COMP_MSVC || (defined __FAST_MATH__) return derived().array().isFinite().all(); #else return !((derived()-derived()).hasNaN()); #endif } } // end namespace Eigen #endif // EIGEN_ALLANDANY_H
4,249
24.757576
113
h
abess
abess-master/python/include/Eigen/src/Core/CommaInitializer.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_COMMAINITIALIZER_H #define EIGEN_COMMAINITIALIZER_H namespace Eigen { /** \class CommaInitializer * \ingroup Core_Module * * \brief Helper class used by the comma initializer operator * * This class is internally used to implement the comma initializer feature. It is * the return type of MatrixBase::operator<<, and most of the time this is the only * way it is used. * * \sa \blank \ref MatrixBaseCommaInitRef "MatrixBase::operator<<", CommaInitializer::finished() */ template<typename XprType> struct CommaInitializer { typedef typename XprType::Scalar Scalar; EIGEN_DEVICE_FUNC inline CommaInitializer(XprType& xpr, const Scalar& s) : m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1) { m_xpr.coeffRef(0,0) = s; } template<typename OtherDerived> EIGEN_DEVICE_FUNC inline CommaInitializer(XprType& xpr, const DenseBase<OtherDerived>& other) : m_xpr(xpr), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows()) { m_xpr.block(0, 0, other.rows(), other.cols()) = other; } /* Copy/Move constructor which transfers ownership. This is crucial in * absence of return value optimization to avoid assertions during destruction. */ // FIXME in C++11 mode this could be replaced by a proper RValue constructor EIGEN_DEVICE_FUNC inline CommaInitializer(const CommaInitializer& o) : m_xpr(o.m_xpr), m_row(o.m_row), m_col(o.m_col), m_currentBlockRows(o.m_currentBlockRows) { // Mark original object as finished. In absence of R-value references we need to const_cast: const_cast<CommaInitializer&>(o).m_row = m_xpr.rows(); const_cast<CommaInitializer&>(o).m_col = m_xpr.cols(); const_cast<CommaInitializer&>(o).m_currentBlockRows = 0; } /* inserts a scalar value in the target matrix */ EIGEN_DEVICE_FUNC CommaInitializer& operator,(const Scalar& s) { if (m_col==m_xpr.cols()) { m_row+=m_currentBlockRows; m_col = 0; m_currentBlockRows = 1; eigen_assert(m_row<m_xpr.rows() && "Too many rows passed to comma initializer (operator<<)"); } eigen_assert(m_col<m_xpr.cols() && "Too many coefficients passed to comma initializer (operator<<)"); eigen_assert(m_currentBlockRows==1); m_xpr.coeffRef(m_row, m_col++) = s; return *this; } /* inserts a matrix expression in the target matrix */ template<typename OtherDerived> EIGEN_DEVICE_FUNC CommaInitializer& operator,(const DenseBase<OtherDerived>& other) { if (m_col==m_xpr.cols() && (other.cols()!=0 || other.rows()!=m_currentBlockRows)) { m_row+=m_currentBlockRows; m_col = 0; m_currentBlockRows = other.rows(); eigen_assert(m_row+m_currentBlockRows<=m_xpr.rows() && "Too many rows passed to comma initializer (operator<<)"); } eigen_assert((m_col + other.cols() <= m_xpr.cols()) && "Too many coefficients passed to comma initializer (operator<<)"); eigen_assert(m_currentBlockRows==other.rows()); m_xpr.template block<OtherDerived::RowsAtCompileTime, OtherDerived::ColsAtCompileTime> (m_row, m_col, other.rows(), other.cols()) = other; m_col += other.cols(); return *this; } EIGEN_DEVICE_FUNC inline ~CommaInitializer() #if defined VERIFY_RAISES_ASSERT && (!defined EIGEN_NO_ASSERTION_CHECKING) && defined EIGEN_EXCEPTIONS EIGEN_EXCEPTION_SPEC(Eigen::eigen_assert_exception) #endif { finished(); } /** \returns the built matrix once all its coefficients have been set. * Calling finished is 100% optional. Its purpose is to write expressions * like this: * \code * quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished()); * \endcode */ EIGEN_DEVICE_FUNC inline XprType& finished() { eigen_assert(((m_row+m_currentBlockRows) == m_xpr.rows() || m_xpr.cols() == 0) && m_col == m_xpr.cols() && "Too few coefficients passed to comma initializer (operator<<)"); return m_xpr; } XprType& m_xpr; // target expression Index m_row; // current row id Index m_col; // current col id Index m_currentBlockRows; // current block height }; /** \anchor MatrixBaseCommaInitRef * Convenient operator to set the coefficients of a matrix. * * The coefficients must be provided in a row major order and exactly match * the size of the matrix. Otherwise an assertion is raised. * * Example: \include MatrixBase_set.cpp * Output: \verbinclude MatrixBase_set.out * * \note According the c++ standard, the argument expressions of this comma initializer are evaluated in arbitrary order. * * \sa CommaInitializer::finished(), class CommaInitializer */ template<typename Derived> inline CommaInitializer<Derived> DenseBase<Derived>::operator<< (const Scalar& s) { return CommaInitializer<Derived>(*static_cast<Derived*>(this), s); } /** \sa operator<<(const Scalar&) */ template<typename Derived> template<typename OtherDerived> inline CommaInitializer<Derived> DenseBase<Derived>::operator<<(const DenseBase<OtherDerived>& other) { return CommaInitializer<Derived>(*static_cast<Derived *>(this), other); } } // end namespace Eigen #endif // EIGEN_COMMAINITIALIZER_H
5,689
34.341615
122
h
abess
abess-master/python/include/Eigen/src/Core/ConditionEstimator.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Rasmus Munk Larsen (rmlarsen@google.com) // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CONDITIONESTIMATOR_H #define EIGEN_CONDITIONESTIMATOR_H namespace Eigen { namespace internal { template <typename Vector, typename RealVector, bool IsComplex> struct rcond_compute_sign { static inline Vector run(const Vector& v) { const RealVector v_abs = v.cwiseAbs(); return (v_abs.array() == static_cast<typename Vector::RealScalar>(0)) .select(Vector::Ones(v.size()), v.cwiseQuotient(v_abs)); } }; // Partial specialization to avoid elementwise division for real vectors. template <typename Vector> struct rcond_compute_sign<Vector, Vector, false> { static inline Vector run(const Vector& v) { return (v.array() < static_cast<typename Vector::RealScalar>(0)) .select(-Vector::Ones(v.size()), Vector::Ones(v.size())); } }; /** * \returns an estimate of ||inv(matrix)||_1 given a decomposition of * \a matrix that implements .solve() and .adjoint().solve() methods. * * This function implements Algorithms 4.1 and 5.1 from * http://www.maths.manchester.ac.uk/~higham/narep/narep135.pdf * which also forms the basis for the condition number estimators in * LAPACK. Since at most 10 calls to the solve method of dec are * performed, the total cost is O(dims^2), as opposed to O(dims^3) * needed to compute the inverse matrix explicitly. * * The most common usage is in estimating the condition number * ||matrix||_1 * ||inv(matrix)||_1. The first term ||matrix||_1 can be * computed directly in O(n^2) operations. * * Supports the following decompositions: FullPivLU, PartialPivLU, LDLT, and * LLT. * * \sa FullPivLU, PartialPivLU, LDLT, LLT. */ template <typename Decomposition> typename Decomposition::RealScalar rcond_invmatrix_L1_norm_estimate(const Decomposition& dec) { typedef typename Decomposition::MatrixType MatrixType; typedef typename Decomposition::Scalar Scalar; typedef typename Decomposition::RealScalar RealScalar; typedef typename internal::plain_col_type<MatrixType>::type Vector; typedef typename internal::plain_col_type<MatrixType, RealScalar>::type RealVector; const bool is_complex = (NumTraits<Scalar>::IsComplex != 0); eigen_assert(dec.rows() == dec.cols()); const Index n = dec.rows(); if (n == 0) return 0; // Disable Index to float conversion warning #ifdef __INTEL_COMPILER #pragma warning push #pragma warning ( disable : 2259 ) #endif Vector v = dec.solve(Vector::Ones(n) / Scalar(n)); #ifdef __INTEL_COMPILER #pragma warning pop #endif // lower_bound is a lower bound on // ||inv(matrix)||_1 = sup_v ||inv(matrix) v||_1 / ||v||_1 // and is the objective maximized by the ("super-") gradient ascent // algorithm below. RealScalar lower_bound = v.template lpNorm<1>(); if (n == 1) return lower_bound; // Gradient ascent algorithm follows: We know that the optimum is achieved at // one of the simplices v = e_i, so in each iteration we follow a // super-gradient to move towards the optimal one. RealScalar old_lower_bound = lower_bound; Vector sign_vector(n); Vector old_sign_vector; Index v_max_abs_index = -1; Index old_v_max_abs_index = v_max_abs_index; for (int k = 0; k < 4; ++k) { sign_vector = internal::rcond_compute_sign<Vector, RealVector, is_complex>::run(v); if (k > 0 && !is_complex && sign_vector == old_sign_vector) { // Break if the solution stagnated. break; } // v_max_abs_index = argmax |real( inv(matrix)^T * sign_vector )| v = dec.adjoint().solve(sign_vector); v.real().cwiseAbs().maxCoeff(&v_max_abs_index); if (v_max_abs_index == old_v_max_abs_index) { // Break if the solution stagnated. break; } // Move to the new simplex e_j, where j = v_max_abs_index. v = dec.solve(Vector::Unit(n, v_max_abs_index)); // v = inv(matrix) * e_j. lower_bound = v.template lpNorm<1>(); if (lower_bound <= old_lower_bound) { // Break if the gradient step did not increase the lower_bound. break; } if (!is_complex) { old_sign_vector = sign_vector; } old_v_max_abs_index = v_max_abs_index; old_lower_bound = lower_bound; } // The following calculates an independent estimate of ||matrix||_1 by // multiplying matrix by a vector with entries of slowly increasing // magnitude and alternating sign: // v_i = (-1)^{i} (1 + (i / (dim-1))), i = 0,...,dim-1. // This improvement to Hager's algorithm above is due to Higham. It was // added to make the algorithm more robust in certain corner cases where // large elements in the matrix might otherwise escape detection due to // exact cancellation (especially when op and op_adjoint correspond to a // sequence of backsubstitutions and permutations), which could cause // Hager's algorithm to vastly underestimate ||matrix||_1. Scalar alternating_sign(RealScalar(1)); for (Index i = 0; i < n; ++i) { // The static_cast is needed when Scalar is a complex and RealScalar implements expression templates v[i] = alternating_sign * static_cast<RealScalar>(RealScalar(1) + (RealScalar(i) / (RealScalar(n - 1)))); alternating_sign = -alternating_sign; } v = dec.solve(v); const RealScalar alternate_lower_bound = (2 * v.template lpNorm<1>()) / (3 * RealScalar(n)); return numext::maxi(lower_bound, alternate_lower_bound); } /** \brief Reciprocal condition number estimator. * * Computing a decomposition of a dense matrix takes O(n^3) operations, while * this method estimates the condition number quickly and reliably in O(n^2) * operations. * * \returns an estimate of the reciprocal condition number * (1 / (||matrix||_1 * ||inv(matrix)||_1)) of matrix, given ||matrix||_1 and * its decomposition. Supports the following decompositions: FullPivLU, * PartialPivLU, LDLT, and LLT. * * \sa FullPivLU, PartialPivLU, LDLT, LLT. */ template <typename Decomposition> typename Decomposition::RealScalar rcond_estimate_helper(typename Decomposition::RealScalar matrix_norm, const Decomposition& dec) { typedef typename Decomposition::RealScalar RealScalar; eigen_assert(dec.rows() == dec.cols()); if (dec.rows() == 0) return RealScalar(1); if (matrix_norm == RealScalar(0)) return RealScalar(0); if (dec.rows() == 1) return RealScalar(1); const RealScalar inverse_matrix_norm = rcond_invmatrix_L1_norm_estimate(dec); return (inverse_matrix_norm == RealScalar(0) ? RealScalar(0) : (RealScalar(1) / inverse_matrix_norm) / matrix_norm); } } // namespace internal } // namespace Eigen #endif
6,970
38.607955
109
h
abess
abess-master/python/include/Eigen/src/Core/CoreEvaluators.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2011-2012 Jitse Niesen <jitse@maths.leeds.ac.uk> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_COREEVALUATORS_H #define EIGEN_COREEVALUATORS_H namespace Eigen { namespace internal { // This class returns the evaluator kind from the expression storage kind. // Default assumes index based accessors template<typename StorageKind> struct storage_kind_to_evaluator_kind { typedef IndexBased Kind; }; // This class returns the evaluator shape from the expression storage kind. // It can be Dense, Sparse, Triangular, Diagonal, SelfAdjoint, Band, etc. template<typename StorageKind> struct storage_kind_to_shape; template<> struct storage_kind_to_shape<Dense> { typedef DenseShape Shape; }; template<> struct storage_kind_to_shape<SolverStorage> { typedef SolverShape Shape; }; template<> struct storage_kind_to_shape<PermutationStorage> { typedef PermutationShape Shape; }; template<> struct storage_kind_to_shape<TranspositionsStorage> { typedef TranspositionsShape Shape; }; // Evaluators have to be specialized with respect to various criteria such as: // - storage/structure/shape // - scalar type // - etc. // Therefore, we need specialization of evaluator providing additional template arguments for each kind of evaluators. // We currently distinguish the following kind of evaluators: // - unary_evaluator for expressions taking only one arguments (CwiseUnaryOp, CwiseUnaryView, Transpose, MatrixWrapper, ArrayWrapper, Reverse, Replicate) // - binary_evaluator for expression taking two arguments (CwiseBinaryOp) // - ternary_evaluator for expression taking three arguments (CwiseTernaryOp) // - product_evaluator for linear algebra products (Product); special case of binary_evaluator because it requires additional tags for dispatching. // - mapbase_evaluator for Map, Block, Ref // - block_evaluator for Block (special dispatching to a mapbase_evaluator or unary_evaluator) template< typename T, typename Arg1Kind = typename evaluator_traits<typename T::Arg1>::Kind, typename Arg2Kind = typename evaluator_traits<typename T::Arg2>::Kind, typename Arg3Kind = typename evaluator_traits<typename T::Arg3>::Kind, typename Arg1Scalar = typename traits<typename T::Arg1>::Scalar, typename Arg2Scalar = typename traits<typename T::Arg2>::Scalar, typename Arg3Scalar = typename traits<typename T::Arg3>::Scalar> struct ternary_evaluator; template< typename T, typename LhsKind = typename evaluator_traits<typename T::Lhs>::Kind, typename RhsKind = typename evaluator_traits<typename T::Rhs>::Kind, typename LhsScalar = typename traits<typename T::Lhs>::Scalar, typename RhsScalar = typename traits<typename T::Rhs>::Scalar> struct binary_evaluator; template< typename T, typename Kind = typename evaluator_traits<typename T::NestedExpression>::Kind, typename Scalar = typename T::Scalar> struct unary_evaluator; // evaluator_traits<T> contains traits for evaluator<T> template<typename T> struct evaluator_traits_base { // by default, get evaluator kind and shape from storage typedef typename storage_kind_to_evaluator_kind<typename traits<T>::StorageKind>::Kind Kind; typedef typename storage_kind_to_shape<typename traits<T>::StorageKind>::Shape Shape; }; // Default evaluator traits template<typename T> struct evaluator_traits : public evaluator_traits_base<T> { }; template<typename T, typename Shape = typename evaluator_traits<T>::Shape > struct evaluator_assume_aliasing { static const bool value = false; }; // By default, we assume a unary expression: template<typename T> struct evaluator : public unary_evaluator<T> { typedef unary_evaluator<T> Base; EIGEN_DEVICE_FUNC explicit evaluator(const T& xpr) : Base(xpr) {} }; // TODO: Think about const-correctness template<typename T> struct evaluator<const T> : evaluator<T> { EIGEN_DEVICE_FUNC explicit evaluator(const T& xpr) : evaluator<T>(xpr) {} }; // ---------- base class for all evaluators ---------- template<typename ExpressionType> struct evaluator_base : public noncopyable { // TODO that's not very nice to have to propagate all these traits. They are currently only needed to handle outer,inner indices. typedef traits<ExpressionType> ExpressionTraits; enum { Alignment = 0 }; }; // -------------------- Matrix and Array -------------------- // // evaluator<PlainObjectBase> is a common base class for the // Matrix and Array evaluators. // Here we directly specialize evaluator. This is not really a unary expression, and it is, by definition, dense, // so no need for more sophisticated dispatching. template<typename Derived> struct evaluator<PlainObjectBase<Derived> > : evaluator_base<Derived> { typedef PlainObjectBase<Derived> PlainObjectType; typedef typename PlainObjectType::Scalar Scalar; typedef typename PlainObjectType::CoeffReturnType CoeffReturnType; enum { IsRowMajor = PlainObjectType::IsRowMajor, IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime, RowsAtCompileTime = PlainObjectType::RowsAtCompileTime, ColsAtCompileTime = PlainObjectType::ColsAtCompileTime, CoeffReadCost = NumTraits<Scalar>::ReadCost, Flags = traits<Derived>::EvaluatorFlags, Alignment = traits<Derived>::Alignment }; EIGEN_DEVICE_FUNC evaluator() : m_data(0), m_outerStride(IsVectorAtCompileTime ? 0 : int(IsRowMajor) ? ColsAtCompileTime : RowsAtCompileTime) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } EIGEN_DEVICE_FUNC explicit evaluator(const PlainObjectType& m) : m_data(m.data()), m_outerStride(IsVectorAtCompileTime ? 0 : m.outerStride()) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { if (IsRowMajor) return m_data[row * m_outerStride.value() + col]; else return m_data[row + col * m_outerStride.value()]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_data[index]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { if (IsRowMajor) return const_cast<Scalar*>(m_data)[row * m_outerStride.value() + col]; else return const_cast<Scalar*>(m_data)[row + col * m_outerStride.value()]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return const_cast<Scalar*>(m_data)[index]; } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { if (IsRowMajor) return ploadt<PacketType, LoadMode>(m_data + row * m_outerStride.value() + col); else return ploadt<PacketType, LoadMode>(m_data + row + col * m_outerStride.value()); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index index) const { return ploadt<PacketType, LoadMode>(m_data + index); } template<int StoreMode,typename PacketType> EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { if (IsRowMajor) return pstoret<Scalar, PacketType, StoreMode> (const_cast<Scalar*>(m_data) + row * m_outerStride.value() + col, x); else return pstoret<Scalar, PacketType, StoreMode> (const_cast<Scalar*>(m_data) + row + col * m_outerStride.value(), x); } template<int StoreMode, typename PacketType> EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { return pstoret<Scalar, PacketType, StoreMode>(const_cast<Scalar*>(m_data) + index, x); } protected: const Scalar *m_data; // We do not need to know the outer stride for vectors variable_if_dynamic<Index, IsVectorAtCompileTime ? 0 : int(IsRowMajor) ? ColsAtCompileTime : RowsAtCompileTime> m_outerStride; }; template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols> struct evaluator<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > : evaluator<PlainObjectBase<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > > { typedef Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType; EIGEN_DEVICE_FUNC evaluator() {} EIGEN_DEVICE_FUNC explicit evaluator(const XprType& m) : evaluator<PlainObjectBase<XprType> >(m) { } }; template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols> struct evaluator<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > : evaluator<PlainObjectBase<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > > { typedef Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType; EIGEN_DEVICE_FUNC evaluator() {} EIGEN_DEVICE_FUNC explicit evaluator(const XprType& m) : evaluator<PlainObjectBase<XprType> >(m) { } }; // -------------------- Transpose -------------------- template<typename ArgType> struct unary_evaluator<Transpose<ArgType>, IndexBased> : evaluator_base<Transpose<ArgType> > { typedef Transpose<ArgType> XprType; enum { CoeffReadCost = evaluator<ArgType>::CoeffReadCost, Flags = evaluator<ArgType>::Flags ^ RowMajorBit, Alignment = evaluator<ArgType>::Alignment }; EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& t) : m_argImpl(t.nestedExpression()) {} typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_argImpl.coeff(col, row); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_argImpl.coeff(index); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_argImpl.coeffRef(col, row); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename XprType::Scalar& coeffRef(Index index) { return m_argImpl.coeffRef(index); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return m_argImpl.template packet<LoadMode,PacketType>(col, row); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index index) const { return m_argImpl.template packet<LoadMode,PacketType>(index); } template<int StoreMode, typename PacketType> EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { m_argImpl.template writePacket<StoreMode,PacketType>(col, row, x); } template<int StoreMode, typename PacketType> EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { m_argImpl.template writePacket<StoreMode,PacketType>(index, x); } protected: evaluator<ArgType> m_argImpl; }; // -------------------- CwiseNullaryOp -------------------- // Like Matrix and Array, this is not really a unary expression, so we directly specialize evaluator. // Likewise, there is not need to more sophisticated dispatching here. template<typename Scalar,typename NullaryOp, bool has_nullary = has_nullary_operator<NullaryOp>::value, bool has_unary = has_unary_operator<NullaryOp>::value, bool has_binary = has_binary_operator<NullaryOp>::value> struct nullary_wrapper { template <typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { return op(i,j); } template <typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); } template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { return op.template packetOp<T>(i,j); } template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp<T>(i); } }; template<typename Scalar,typename NullaryOp> struct nullary_wrapper<Scalar,NullaryOp,true,false,false> { template <typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType=0, IndexType=0) const { return op(); } template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType=0, IndexType=0) const { return op.template packetOp<T>(); } }; template<typename Scalar,typename NullaryOp> struct nullary_wrapper<Scalar,NullaryOp,false,false,true> { template <typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j=0) const { return op(i,j); } template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j=0) const { return op.template packetOp<T>(i,j); } }; // We need the following specialization for vector-only functors assigned to a runtime vector, // for instance, using linspace and assigning a RowVectorXd to a MatrixXd or even a row of a MatrixXd. // In this case, i==0 and j is used for the actual iteration. template<typename Scalar,typename NullaryOp> struct nullary_wrapper<Scalar,NullaryOp,false,true,false> { template <typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { eigen_assert(i==0 || j==0); return op(i+j); } template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { eigen_assert(i==0 || j==0); return op.template packetOp<T>(i+j); } template <typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); } template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp<T>(i); } }; template<typename Scalar,typename NullaryOp> struct nullary_wrapper<Scalar,NullaryOp,false,false,false> {}; #if 0 && EIGEN_COMP_MSVC>0 // Disable this ugly workaround. This is now handled in traits<Ref>::match, // but this piece of code might still become handly if some other weird compilation // erros pop up again. // MSVC exhibits a weird compilation error when // compiling: // Eigen::MatrixXf A = MatrixXf::Random(3,3); // Ref<const MatrixXf> R = 2.f*A; // and that has_*ary_operator<scalar_constant_op<float>> have not been instantiated yet. // The "problem" is that evaluator<2.f*A> is instantiated by traits<Ref>::match<2.f*A> // and at that time has_*ary_operator<T> returns true regardless of T. // Then nullary_wrapper is badly instantiated as nullary_wrapper<.,.,true,true,true>. // The trick is thus to defer the proper instantiation of nullary_wrapper when coeff(), // and packet() are really instantiated as implemented below: // This is a simple wrapper around Index to enforce the re-instantiation of // has_*ary_operator when needed. template<typename T> struct nullary_wrapper_workaround_msvc { nullary_wrapper_workaround_msvc(const T&); operator T()const; }; template<typename Scalar,typename NullaryOp> struct nullary_wrapper<Scalar,NullaryOp,true,true,true> { template <typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { return nullary_wrapper<Scalar,NullaryOp, has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value, has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value, has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().operator()(op,i,j); } template <typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return nullary_wrapper<Scalar,NullaryOp, has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value, has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value, has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().operator()(op,i); } template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { return nullary_wrapper<Scalar,NullaryOp, has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value, has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value, has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().template packetOp<T>(op,i,j); } template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return nullary_wrapper<Scalar,NullaryOp, has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value, has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value, has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().template packetOp<T>(op,i); } }; #endif // MSVC workaround template<typename NullaryOp, typename PlainObjectType> struct evaluator<CwiseNullaryOp<NullaryOp,PlainObjectType> > : evaluator_base<CwiseNullaryOp<NullaryOp,PlainObjectType> > { typedef CwiseNullaryOp<NullaryOp,PlainObjectType> XprType; typedef typename internal::remove_all<PlainObjectType>::type PlainObjectTypeCleaned; enum { CoeffReadCost = internal::functor_traits<NullaryOp>::Cost, Flags = (evaluator<PlainObjectTypeCleaned>::Flags & ( HereditaryBits | (functor_has_linear_access<NullaryOp>::ret ? LinearAccessBit : 0) | (functor_traits<NullaryOp>::PacketAccess ? PacketAccessBit : 0))) | (functor_traits<NullaryOp>::IsRepeatable ? 0 : EvalBeforeNestingBit), Alignment = AlignedMax }; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& n) : m_functor(n.functor()), m_wrapper() { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::CoeffReturnType CoeffReturnType; template <typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(IndexType row, IndexType col) const { return m_wrapper(m_functor, row, col); } template <typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(IndexType index) const { return m_wrapper(m_functor,index); } template<int LoadMode, typename PacketType, typename IndexType> EIGEN_STRONG_INLINE PacketType packet(IndexType row, IndexType col) const { return m_wrapper.template packetOp<PacketType>(m_functor, row, col); } template<int LoadMode, typename PacketType, typename IndexType> EIGEN_STRONG_INLINE PacketType packet(IndexType index) const { return m_wrapper.template packetOp<PacketType>(m_functor, index); } protected: const NullaryOp m_functor; const internal::nullary_wrapper<CoeffReturnType,NullaryOp> m_wrapper; }; // -------------------- CwiseUnaryOp -------------------- template<typename UnaryOp, typename ArgType> struct unary_evaluator<CwiseUnaryOp<UnaryOp, ArgType>, IndexBased > : evaluator_base<CwiseUnaryOp<UnaryOp, ArgType> > { typedef CwiseUnaryOp<UnaryOp, ArgType> XprType; enum { CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<UnaryOp>::Cost, Flags = evaluator<ArgType>::Flags & (HereditaryBits | LinearAccessBit | (functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0)), Alignment = evaluator<ArgType>::Alignment }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit unary_evaluator(const XprType& op) : m_functor(op.functor()), m_argImpl(op.nestedExpression()) { EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_functor(m_argImpl.coeff(row, col)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_functor(m_argImpl.coeff(index)); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return m_functor.packetOp(m_argImpl.template packet<LoadMode, PacketType>(row, col)); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index index) const { return m_functor.packetOp(m_argImpl.template packet<LoadMode, PacketType>(index)); } protected: const UnaryOp m_functor; evaluator<ArgType> m_argImpl; }; // -------------------- CwiseTernaryOp -------------------- // this is a ternary expression template<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3> struct evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> > : public ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> > { typedef CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> XprType; typedef ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> > Base; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {} }; template<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3> struct ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3>, IndexBased, IndexBased> : evaluator_base<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> > { typedef CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> XprType; enum { CoeffReadCost = evaluator<Arg1>::CoeffReadCost + evaluator<Arg2>::CoeffReadCost + evaluator<Arg3>::CoeffReadCost + functor_traits<TernaryOp>::Cost, Arg1Flags = evaluator<Arg1>::Flags, Arg2Flags = evaluator<Arg2>::Flags, Arg3Flags = evaluator<Arg3>::Flags, SameType = is_same<typename Arg1::Scalar,typename Arg2::Scalar>::value && is_same<typename Arg1::Scalar,typename Arg3::Scalar>::value, StorageOrdersAgree = (int(Arg1Flags)&RowMajorBit)==(int(Arg2Flags)&RowMajorBit) && (int(Arg1Flags)&RowMajorBit)==(int(Arg3Flags)&RowMajorBit), Flags0 = (int(Arg1Flags) | int(Arg2Flags) | int(Arg3Flags)) & ( HereditaryBits | (int(Arg1Flags) & int(Arg2Flags) & int(Arg3Flags) & ( (StorageOrdersAgree ? LinearAccessBit : 0) | (functor_traits<TernaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0) ) ) ), Flags = (Flags0 & ~RowMajorBit) | (Arg1Flags & RowMajorBit), Alignment = EIGEN_PLAIN_ENUM_MIN( EIGEN_PLAIN_ENUM_MIN(evaluator<Arg1>::Alignment, evaluator<Arg2>::Alignment), evaluator<Arg3>::Alignment) }; EIGEN_DEVICE_FUNC explicit ternary_evaluator(const XprType& xpr) : m_functor(xpr.functor()), m_arg1Impl(xpr.arg1()), m_arg2Impl(xpr.arg2()), m_arg3Impl(xpr.arg3()) { EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<TernaryOp>::Cost); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_functor(m_arg1Impl.coeff(row, col), m_arg2Impl.coeff(row, col), m_arg3Impl.coeff(row, col)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_functor(m_arg1Impl.coeff(index), m_arg2Impl.coeff(index), m_arg3Impl.coeff(index)); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return m_functor.packetOp(m_arg1Impl.template packet<LoadMode,PacketType>(row, col), m_arg2Impl.template packet<LoadMode,PacketType>(row, col), m_arg3Impl.template packet<LoadMode,PacketType>(row, col)); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index index) const { return m_functor.packetOp(m_arg1Impl.template packet<LoadMode,PacketType>(index), m_arg2Impl.template packet<LoadMode,PacketType>(index), m_arg3Impl.template packet<LoadMode,PacketType>(index)); } protected: const TernaryOp m_functor; evaluator<Arg1> m_arg1Impl; evaluator<Arg2> m_arg2Impl; evaluator<Arg3> m_arg3Impl; }; // -------------------- CwiseBinaryOp -------------------- // this is a binary expression template<typename BinaryOp, typename Lhs, typename Rhs> struct evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > : public binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > { typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType; typedef binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > Base; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {} }; template<typename BinaryOp, typename Lhs, typename Rhs> struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IndexBased, IndexBased> : evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > { typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType; enum { CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost, LhsFlags = evaluator<Lhs>::Flags, RhsFlags = evaluator<Rhs>::Flags, SameType = is_same<typename Lhs::Scalar,typename Rhs::Scalar>::value, StorageOrdersAgree = (int(LhsFlags)&RowMajorBit)==(int(RhsFlags)&RowMajorBit), Flags0 = (int(LhsFlags) | int(RhsFlags)) & ( HereditaryBits | (int(LhsFlags) & int(RhsFlags) & ( (StorageOrdersAgree ? LinearAccessBit : 0) | (functor_traits<BinaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0) ) ) ), Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit), Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<Lhs>::Alignment,evaluator<Rhs>::Alignment) }; EIGEN_DEVICE_FUNC explicit binary_evaluator(const XprType& xpr) : m_functor(xpr.functor()), m_lhsImpl(xpr.lhs()), m_rhsImpl(xpr.rhs()) { EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_functor(m_lhsImpl.coeff(row, col), m_rhsImpl.coeff(row, col)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_functor(m_lhsImpl.coeff(index), m_rhsImpl.coeff(index)); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return m_functor.packetOp(m_lhsImpl.template packet<LoadMode,PacketType>(row, col), m_rhsImpl.template packet<LoadMode,PacketType>(row, col)); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index index) const { return m_functor.packetOp(m_lhsImpl.template packet<LoadMode,PacketType>(index), m_rhsImpl.template packet<LoadMode,PacketType>(index)); } protected: const BinaryOp m_functor; evaluator<Lhs> m_lhsImpl; evaluator<Rhs> m_rhsImpl; }; // -------------------- CwiseUnaryView -------------------- template<typename UnaryOp, typename ArgType> struct unary_evaluator<CwiseUnaryView<UnaryOp, ArgType>, IndexBased> : evaluator_base<CwiseUnaryView<UnaryOp, ArgType> > { typedef CwiseUnaryView<UnaryOp, ArgType> XprType; enum { CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<UnaryOp>::Cost, Flags = (evaluator<ArgType>::Flags & (HereditaryBits | LinearAccessBit | DirectAccessBit)), Alignment = 0 // FIXME it is not very clear why alignment is necessarily lost... }; EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& op) : m_unaryOp(op.functor()), m_argImpl(op.nestedExpression()) { EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_unaryOp(m_argImpl.coeff(row, col)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_unaryOp(m_argImpl.coeff(index)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_unaryOp(m_argImpl.coeffRef(row, col)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_unaryOp(m_argImpl.coeffRef(index)); } protected: const UnaryOp m_unaryOp; evaluator<ArgType> m_argImpl; }; // -------------------- Map -------------------- // FIXME perhaps the PlainObjectType could be provided by Derived::PlainObject ? // but that might complicate template specialization template<typename Derived, typename PlainObjectType> struct mapbase_evaluator; template<typename Derived, typename PlainObjectType> struct mapbase_evaluator : evaluator_base<Derived> { typedef Derived XprType; typedef typename XprType::PointerType PointerType; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; enum { IsRowMajor = XprType::RowsAtCompileTime, ColsAtCompileTime = XprType::ColsAtCompileTime, CoeffReadCost = NumTraits<Scalar>::ReadCost }; EIGEN_DEVICE_FUNC explicit mapbase_evaluator(const XprType& map) : m_data(const_cast<PointerType>(map.data())), m_innerStride(map.innerStride()), m_outerStride(map.outerStride()) { EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(evaluator<Derived>::Flags&PacketAccessBit, internal::inner_stride_at_compile_time<Derived>::ret==1), PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_data[col * colStride() + row * rowStride()]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_data[index * m_innerStride.value()]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_data[col * colStride() + row * rowStride()]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_data[index * m_innerStride.value()]; } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { PointerType ptr = m_data + row * rowStride() + col * colStride(); return internal::ploadt<PacketType, LoadMode>(ptr); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index index) const { return internal::ploadt<PacketType, LoadMode>(m_data + index * m_innerStride.value()); } template<int StoreMode, typename PacketType> EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { PointerType ptr = m_data + row * rowStride() + col * colStride(); return internal::pstoret<Scalar, PacketType, StoreMode>(ptr, x); } template<int StoreMode, typename PacketType> EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { internal::pstoret<Scalar, PacketType, StoreMode>(m_data + index * m_innerStride.value(), x); } protected: EIGEN_DEVICE_FUNC inline Index rowStride() const { return XprType::IsRowMajor ? m_outerStride.value() : m_innerStride.value(); } EIGEN_DEVICE_FUNC inline Index colStride() const { return XprType::IsRowMajor ? m_innerStride.value() : m_outerStride.value(); } PointerType m_data; const internal::variable_if_dynamic<Index, XprType::InnerStrideAtCompileTime> m_innerStride; const internal::variable_if_dynamic<Index, XprType::OuterStrideAtCompileTime> m_outerStride; }; template<typename PlainObjectType, int MapOptions, typename StrideType> struct evaluator<Map<PlainObjectType, MapOptions, StrideType> > : public mapbase_evaluator<Map<PlainObjectType, MapOptions, StrideType>, PlainObjectType> { typedef Map<PlainObjectType, MapOptions, StrideType> XprType; typedef typename XprType::Scalar Scalar; // TODO: should check for smaller packet types once we can handle multi-sized packet types typedef typename packet_traits<Scalar>::type PacketScalar; enum { InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0 ? int(PlainObjectType::InnerStrideAtCompileTime) : int(StrideType::InnerStrideAtCompileTime), OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0 ? int(PlainObjectType::OuterStrideAtCompileTime) : int(StrideType::OuterStrideAtCompileTime), HasNoInnerStride = InnerStrideAtCompileTime == 1, HasNoOuterStride = StrideType::OuterStrideAtCompileTime == 0, HasNoStride = HasNoInnerStride && HasNoOuterStride, IsDynamicSize = PlainObjectType::SizeAtCompileTime==Dynamic, PacketAccessMask = bool(HasNoInnerStride) ? ~int(0) : ~int(PacketAccessBit), LinearAccessMask = bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime) ? ~int(0) : ~int(LinearAccessBit), Flags = int( evaluator<PlainObjectType>::Flags) & (LinearAccessMask&PacketAccessMask), Alignment = int(MapOptions)&int(AlignedMask) }; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& map) : mapbase_evaluator<XprType, PlainObjectType>(map) { } }; // -------------------- Ref -------------------- template<typename PlainObjectType, int RefOptions, typename StrideType> struct evaluator<Ref<PlainObjectType, RefOptions, StrideType> > : public mapbase_evaluator<Ref<PlainObjectType, RefOptions, StrideType>, PlainObjectType> { typedef Ref<PlainObjectType, RefOptions, StrideType> XprType; enum { Flags = evaluator<Map<PlainObjectType, RefOptions, StrideType> >::Flags, Alignment = evaluator<Map<PlainObjectType, RefOptions, StrideType> >::Alignment }; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& ref) : mapbase_evaluator<XprType, PlainObjectType>(ref) { } }; // -------------------- Block -------------------- template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel, bool HasDirectAccess = internal::has_direct_access<ArgType>::ret> struct block_evaluator; template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel> struct evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> > : block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel> { typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType; typedef typename XprType::Scalar Scalar; // TODO: should check for smaller packet types once we can handle multi-sized packet types typedef typename packet_traits<Scalar>::type PacketScalar; enum { CoeffReadCost = evaluator<ArgType>::CoeffReadCost, RowsAtCompileTime = traits<XprType>::RowsAtCompileTime, ColsAtCompileTime = traits<XprType>::ColsAtCompileTime, MaxRowsAtCompileTime = traits<XprType>::MaxRowsAtCompileTime, MaxColsAtCompileTime = traits<XprType>::MaxColsAtCompileTime, ArgTypeIsRowMajor = (int(evaluator<ArgType>::Flags)&RowMajorBit) != 0, IsRowMajor = (MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1) ? 1 : (MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1) ? 0 : ArgTypeIsRowMajor, HasSameStorageOrderAsArgType = (IsRowMajor == ArgTypeIsRowMajor), InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime), InnerStrideAtCompileTime = HasSameStorageOrderAsArgType ? int(inner_stride_at_compile_time<ArgType>::ret) : int(outer_stride_at_compile_time<ArgType>::ret), OuterStrideAtCompileTime = HasSameStorageOrderAsArgType ? int(outer_stride_at_compile_time<ArgType>::ret) : int(inner_stride_at_compile_time<ArgType>::ret), MaskPacketAccessBit = (InnerStrideAtCompileTime == 1) ? PacketAccessBit : 0, FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (evaluator<ArgType>::Flags&LinearAccessBit))) ? LinearAccessBit : 0, FlagsRowMajorBit = XprType::Flags&RowMajorBit, Flags0 = evaluator<ArgType>::Flags & ( (HereditaryBits & ~RowMajorBit) | DirectAccessBit | MaskPacketAccessBit), Flags = Flags0 | FlagsLinearAccessBit | FlagsRowMajorBit, PacketAlignment = unpacket_traits<PacketScalar>::alignment, Alignment0 = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % int(PacketAlignment)) == 0)) ? int(PacketAlignment) : 0, Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ArgType>::Alignment, Alignment0) }; typedef block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel> block_evaluator_type; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& block) : block_evaluator_type(block) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } }; // no direct-access => dispatch to a unary evaluator template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel> struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /*HasDirectAccess*/ false> : unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> > { typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType; EIGEN_DEVICE_FUNC explicit block_evaluator(const XprType& block) : unary_evaluator<XprType>(block) {} }; template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel> struct unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>, IndexBased> : evaluator_base<Block<ArgType, BlockRows, BlockCols, InnerPanel> > { typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType; EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& block) : m_argImpl(block.nestedExpression()), m_startRow(block.startRow()), m_startCol(block.startCol()) { } typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; enum { RowsAtCompileTime = XprType::RowsAtCompileTime }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_argImpl.coeff(m_startRow.value() + row, m_startCol.value() + col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return coeff(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_argImpl.coeffRef(m_startRow.value() + row, m_startCol.value() + col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return coeffRef(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return m_argImpl.template packet<LoadMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index index) const { return packet<LoadMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0); } template<int StoreMode, typename PacketType> EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { return m_argImpl.template writePacket<StoreMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col, x); } template<int StoreMode, typename PacketType> EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { return writePacket<StoreMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0, x); } protected: evaluator<ArgType> m_argImpl; const variable_if_dynamic<Index, (ArgType::RowsAtCompileTime == 1 && BlockRows==1) ? 0 : Dynamic> m_startRow; const variable_if_dynamic<Index, (ArgType::ColsAtCompileTime == 1 && BlockCols==1) ? 0 : Dynamic> m_startCol; }; // TODO: This evaluator does not actually use the child evaluator; // all action is via the data() as returned by the Block expression. template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel> struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /* HasDirectAccess */ true> : mapbase_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>, typename Block<ArgType, BlockRows, BlockCols, InnerPanel>::PlainObject> { typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType; typedef typename XprType::Scalar Scalar; EIGEN_DEVICE_FUNC explicit block_evaluator(const XprType& block) : mapbase_evaluator<XprType, typename XprType::PlainObject>(block) { // TODO: for the 3.3 release, this should be turned to an internal assertion, but let's keep it as is for the beta lifetime eigen_assert(((internal::UIntPtr(block.data()) % EIGEN_PLAIN_ENUM_MAX(1,evaluator<XprType>::Alignment)) == 0) && "data is not aligned"); } }; // -------------------- Select -------------------- // NOTE shall we introduce a ternary_evaluator? // TODO enable vectorization for Select template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType> struct evaluator<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> > : evaluator_base<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> > { typedef Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> XprType; enum { CoeffReadCost = evaluator<ConditionMatrixType>::CoeffReadCost + EIGEN_PLAIN_ENUM_MAX(evaluator<ThenMatrixType>::CoeffReadCost, evaluator<ElseMatrixType>::CoeffReadCost), Flags = (unsigned int)evaluator<ThenMatrixType>::Flags & evaluator<ElseMatrixType>::Flags & HereditaryBits, Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ThenMatrixType>::Alignment, evaluator<ElseMatrixType>::Alignment) }; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& select) : m_conditionImpl(select.conditionMatrix()), m_thenImpl(select.thenMatrix()), m_elseImpl(select.elseMatrix()) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { if (m_conditionImpl.coeff(row, col)) return m_thenImpl.coeff(row, col); else return m_elseImpl.coeff(row, col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { if (m_conditionImpl.coeff(index)) return m_thenImpl.coeff(index); else return m_elseImpl.coeff(index); } protected: evaluator<ConditionMatrixType> m_conditionImpl; evaluator<ThenMatrixType> m_thenImpl; evaluator<ElseMatrixType> m_elseImpl; }; // -------------------- Replicate -------------------- template<typename ArgType, int RowFactor, int ColFactor> struct unary_evaluator<Replicate<ArgType, RowFactor, ColFactor> > : evaluator_base<Replicate<ArgType, RowFactor, ColFactor> > { typedef Replicate<ArgType, RowFactor, ColFactor> XprType; typedef typename XprType::CoeffReturnType CoeffReturnType; enum { Factor = (RowFactor==Dynamic || ColFactor==Dynamic) ? Dynamic : RowFactor*ColFactor }; typedef typename internal::nested_eval<ArgType,Factor>::type ArgTypeNested; typedef typename internal::remove_all<ArgTypeNested>::type ArgTypeNestedCleaned; enum { CoeffReadCost = evaluator<ArgTypeNestedCleaned>::CoeffReadCost, LinearAccessMask = XprType::IsVectorAtCompileTime ? LinearAccessBit : 0, Flags = (evaluator<ArgTypeNestedCleaned>::Flags & (HereditaryBits|LinearAccessMask) & ~RowMajorBit) | (traits<XprType>::Flags & RowMajorBit), Alignment = evaluator<ArgTypeNestedCleaned>::Alignment }; EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& replicate) : m_arg(replicate.nestedExpression()), m_argImpl(m_arg), m_rows(replicate.nestedExpression().rows()), m_cols(replicate.nestedExpression().cols()) {} EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { // try to avoid using modulo; this is a pure optimization strategy const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0 : RowFactor==1 ? row : row % m_rows.value(); const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0 : ColFactor==1 ? col : col % m_cols.value(); return m_argImpl.coeff(actual_row, actual_col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { // try to avoid using modulo; this is a pure optimization strategy const Index actual_index = internal::traits<XprType>::RowsAtCompileTime==1 ? (ColFactor==1 ? index : index%m_cols.value()) : (RowFactor==1 ? index : index%m_rows.value()); return m_argImpl.coeff(actual_index); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0 : RowFactor==1 ? row : row % m_rows.value(); const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0 : ColFactor==1 ? col : col % m_cols.value(); return m_argImpl.template packet<LoadMode,PacketType>(actual_row, actual_col); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index index) const { const Index actual_index = internal::traits<XprType>::RowsAtCompileTime==1 ? (ColFactor==1 ? index : index%m_cols.value()) : (RowFactor==1 ? index : index%m_rows.value()); return m_argImpl.template packet<LoadMode,PacketType>(actual_index); } protected: const ArgTypeNested m_arg; evaluator<ArgTypeNestedCleaned> m_argImpl; const variable_if_dynamic<Index, ArgType::RowsAtCompileTime> m_rows; const variable_if_dynamic<Index, ArgType::ColsAtCompileTime> m_cols; }; // -------------------- PartialReduxExpr -------------------- template< typename ArgType, typename MemberOp, int Direction> struct evaluator<PartialReduxExpr<ArgType, MemberOp, Direction> > : evaluator_base<PartialReduxExpr<ArgType, MemberOp, Direction> > { typedef PartialReduxExpr<ArgType, MemberOp, Direction> XprType; typedef typename internal::nested_eval<ArgType,1>::type ArgTypeNested; typedef typename internal::remove_all<ArgTypeNested>::type ArgTypeNestedCleaned; typedef typename ArgType::Scalar InputScalar; typedef typename XprType::Scalar Scalar; enum { TraversalSize = Direction==int(Vertical) ? int(ArgType::RowsAtCompileTime) : int(ArgType::ColsAtCompileTime) }; typedef typename MemberOp::template Cost<InputScalar,int(TraversalSize)> CostOpType; enum { CoeffReadCost = TraversalSize==Dynamic ? HugeCost : TraversalSize * evaluator<ArgType>::CoeffReadCost + int(CostOpType::value), Flags = (traits<XprType>::Flags&RowMajorBit) | (evaluator<ArgType>::Flags&(HereditaryBits&(~RowMajorBit))) | LinearAccessBit, Alignment = 0 // FIXME this will need to be improved once PartialReduxExpr is vectorized }; EIGEN_DEVICE_FUNC explicit evaluator(const XprType xpr) : m_arg(xpr.nestedExpression()), m_functor(xpr.functor()) { EIGEN_INTERNAL_CHECK_COST_VALUE(TraversalSize==Dynamic ? HugeCost : int(CostOpType::value)); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index i, Index j) const { if (Direction==Vertical) return m_functor(m_arg.col(j)); else return m_functor(m_arg.row(i)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index index) const { if (Direction==Vertical) return m_functor(m_arg.col(index)); else return m_functor(m_arg.row(index)); } protected: typename internal::add_const_on_value_type<ArgTypeNested>::type m_arg; const MemberOp m_functor; }; // -------------------- MatrixWrapper and ArrayWrapper -------------------- // // evaluator_wrapper_base<T> is a common base class for the // MatrixWrapper and ArrayWrapper evaluators. template<typename XprType> struct evaluator_wrapper_base : evaluator_base<XprType> { typedef typename remove_all<typename XprType::NestedExpressionType>::type ArgType; enum { CoeffReadCost = evaluator<ArgType>::CoeffReadCost, Flags = evaluator<ArgType>::Flags, Alignment = evaluator<ArgType>::Alignment }; EIGEN_DEVICE_FUNC explicit evaluator_wrapper_base(const ArgType& arg) : m_argImpl(arg) {} typedef typename ArgType::Scalar Scalar; typedef typename ArgType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_argImpl.coeff(row, col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_argImpl.coeff(index); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_argImpl.coeffRef(row, col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_argImpl.coeffRef(index); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return m_argImpl.template packet<LoadMode,PacketType>(row, col); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index index) const { return m_argImpl.template packet<LoadMode,PacketType>(index); } template<int StoreMode, typename PacketType> EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { m_argImpl.template writePacket<StoreMode>(row, col, x); } template<int StoreMode, typename PacketType> EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { m_argImpl.template writePacket<StoreMode>(index, x); } protected: evaluator<ArgType> m_argImpl; }; template<typename TArgType> struct unary_evaluator<MatrixWrapper<TArgType> > : evaluator_wrapper_base<MatrixWrapper<TArgType> > { typedef MatrixWrapper<TArgType> XprType; EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& wrapper) : evaluator_wrapper_base<MatrixWrapper<TArgType> >(wrapper.nestedExpression()) { } }; template<typename TArgType> struct unary_evaluator<ArrayWrapper<TArgType> > : evaluator_wrapper_base<ArrayWrapper<TArgType> > { typedef ArrayWrapper<TArgType> XprType; EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& wrapper) : evaluator_wrapper_base<ArrayWrapper<TArgType> >(wrapper.nestedExpression()) { } }; // -------------------- Reverse -------------------- // defined in Reverse.h: template<typename PacketType, bool ReversePacket> struct reverse_packet_cond; template<typename ArgType, int Direction> struct unary_evaluator<Reverse<ArgType, Direction> > : evaluator_base<Reverse<ArgType, Direction> > { typedef Reverse<ArgType, Direction> XprType; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; enum { IsRowMajor = XprType::IsRowMajor, IsColMajor = !IsRowMajor, ReverseRow = (Direction == Vertical) || (Direction == BothDirections), ReverseCol = (Direction == Horizontal) || (Direction == BothDirections), ReversePacket = (Direction == BothDirections) || ((Direction == Vertical) && IsColMajor) || ((Direction == Horizontal) && IsRowMajor), CoeffReadCost = evaluator<ArgType>::CoeffReadCost, // let's enable LinearAccess only with vectorization because of the product overhead // FIXME enable DirectAccess with negative strides? Flags0 = evaluator<ArgType>::Flags, LinearAccess = ( (Direction==BothDirections) && (int(Flags0)&PacketAccessBit) ) || ((ReverseRow && XprType::ColsAtCompileTime==1) || (ReverseCol && XprType::RowsAtCompileTime==1)) ? LinearAccessBit : 0, Flags = int(Flags0) & (HereditaryBits | PacketAccessBit | LinearAccess), Alignment = 0 // FIXME in some rare cases, Alignment could be preserved, like a Vector4f. }; EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& reverse) : m_argImpl(reverse.nestedExpression()), m_rows(ReverseRow ? reverse.nestedExpression().rows() : 1), m_cols(ReverseCol ? reverse.nestedExpression().cols() : 1) { } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_argImpl.coeff(ReverseRow ? m_rows.value() - row - 1 : row, ReverseCol ? m_cols.value() - col - 1 : col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_argImpl.coeff(m_rows.value() * m_cols.value() - index - 1); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_argImpl.coeffRef(ReverseRow ? m_rows.value() - row - 1 : row, ReverseCol ? m_cols.value() - col - 1 : col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_argImpl.coeffRef(m_rows.value() * m_cols.value() - index - 1); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { enum { PacketSize = unpacket_traits<PacketType>::size, OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1, OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1 }; typedef internal::reverse_packet_cond<PacketType,ReversePacket> reverse_packet; return reverse_packet::run(m_argImpl.template packet<LoadMode,PacketType>( ReverseRow ? m_rows.value() - row - OffsetRow : row, ReverseCol ? m_cols.value() - col - OffsetCol : col)); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE PacketType packet(Index index) const { enum { PacketSize = unpacket_traits<PacketType>::size }; return preverse(m_argImpl.template packet<LoadMode,PacketType>(m_rows.value() * m_cols.value() - index - PacketSize)); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { // FIXME we could factorize some code with packet(i,j) enum { PacketSize = unpacket_traits<PacketType>::size, OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1, OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1 }; typedef internal::reverse_packet_cond<PacketType,ReversePacket> reverse_packet; m_argImpl.template writePacket<LoadMode>( ReverseRow ? m_rows.value() - row - OffsetRow : row, ReverseCol ? m_cols.value() - col - OffsetCol : col, reverse_packet::run(x)); } template<int LoadMode, typename PacketType> EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { enum { PacketSize = unpacket_traits<PacketType>::size }; m_argImpl.template writePacket<LoadMode> (m_rows.value() * m_cols.value() - index - PacketSize, preverse(x)); } protected: evaluator<ArgType> m_argImpl; // If we do not reverse rows, then we do not need to know the number of rows; same for columns // Nonetheless, in this case it is important to set to 1 such that the coeff(index) method works fine for vectors. const variable_if_dynamic<Index, ReverseRow ? ArgType::RowsAtCompileTime : 1> m_rows; const variable_if_dynamic<Index, ReverseCol ? ArgType::ColsAtCompileTime : 1> m_cols; }; // -------------------- Diagonal -------------------- template<typename ArgType, int DiagIndex> struct evaluator<Diagonal<ArgType, DiagIndex> > : evaluator_base<Diagonal<ArgType, DiagIndex> > { typedef Diagonal<ArgType, DiagIndex> XprType; enum { CoeffReadCost = evaluator<ArgType>::CoeffReadCost, Flags = (unsigned int)(evaluator<ArgType>::Flags & (HereditaryBits | DirectAccessBit) & ~RowMajorBit) | LinearAccessBit, Alignment = 0 }; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& diagonal) : m_argImpl(diagonal.nestedExpression()), m_index(diagonal.index()) { } typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index) const { return m_argImpl.coeff(row + rowOffset(), row + colOffset()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_argImpl.coeff(index + rowOffset(), index + colOffset()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index) { return m_argImpl.coeffRef(row + rowOffset(), row + colOffset()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_argImpl.coeffRef(index + rowOffset(), index + colOffset()); } protected: evaluator<ArgType> m_argImpl; const internal::variable_if_dynamicindex<Index, XprType::DiagIndex> m_index; private: EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value() > 0 ? 0 : -m_index.value(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value() > 0 ? m_index.value() : 0; } }; //---------------------------------------------------------------------- // deprecated code //---------------------------------------------------------------------- // -------------------- EvalToTemp -------------------- // expression class for evaluating nested expression to a temporary template<typename ArgType> class EvalToTemp; template<typename ArgType> struct traits<EvalToTemp<ArgType> > : public traits<ArgType> { }; template<typename ArgType> class EvalToTemp : public dense_xpr_base<EvalToTemp<ArgType> >::type { public: typedef typename dense_xpr_base<EvalToTemp>::type Base; EIGEN_GENERIC_PUBLIC_INTERFACE(EvalToTemp) explicit EvalToTemp(const ArgType& arg) : m_arg(arg) { } const ArgType& arg() const { return m_arg; } Index rows() const { return m_arg.rows(); } Index cols() const { return m_arg.cols(); } private: const ArgType& m_arg; }; template<typename ArgType> struct evaluator<EvalToTemp<ArgType> > : public evaluator<typename ArgType::PlainObject> { typedef EvalToTemp<ArgType> XprType; typedef typename ArgType::PlainObject PlainObject; typedef evaluator<PlainObject> Base; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : m_result(xpr.arg()) { ::new (static_cast<Base*>(this)) Base(m_result); } // This constructor is used when nesting an EvalTo evaluator in another evaluator EIGEN_DEVICE_FUNC evaluator(const ArgType& arg) : m_result(arg) { ::new (static_cast<Base*>(this)) Base(m_result); } protected: PlainObject m_result; }; } // namespace internal } // end namespace Eigen #endif // EIGEN_COREEVALUATORS_H
61,293
35.659091
188
h
abess
abess-master/python/include/Eigen/src/Core/CoreIterators.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_COREITERATORS_H #define EIGEN_COREITERATORS_H namespace Eigen { /* This file contains the respective InnerIterator definition of the expressions defined in Eigen/Core */ namespace internal { template<typename XprType, typename EvaluatorKind> class inner_iterator_selector; } /** \class InnerIterator * \brief An InnerIterator allows to loop over the element of any matrix expression. * * \warning To be used with care because an evaluator is constructed every time an InnerIterator iterator is constructed. * * TODO: add a usage example */ template<typename XprType> class InnerIterator { protected: typedef internal::inner_iterator_selector<XprType, typename internal::evaluator_traits<XprType>::Kind> IteratorType; typedef internal::evaluator<XprType> EvaluatorType; typedef typename internal::traits<XprType>::Scalar Scalar; public: /** Construct an iterator over the \a outerId -th row or column of \a xpr */ InnerIterator(const XprType &xpr, const Index &outerId) : m_eval(xpr), m_iter(m_eval, outerId, xpr.innerSize()) {} /// \returns the value of the current coefficient. EIGEN_STRONG_INLINE Scalar value() const { return m_iter.value(); } /** Increment the iterator \c *this to the next non-zero coefficient. * Explicit zeros are not skipped over. To skip explicit zeros, see class SparseView */ EIGEN_STRONG_INLINE InnerIterator& operator++() { m_iter.operator++(); return *this; } /// \returns the column or row index of the current coefficient. EIGEN_STRONG_INLINE Index index() const { return m_iter.index(); } /// \returns the row index of the current coefficient. EIGEN_STRONG_INLINE Index row() const { return m_iter.row(); } /// \returns the column index of the current coefficient. EIGEN_STRONG_INLINE Index col() const { return m_iter.col(); } /// \returns \c true if the iterator \c *this still references a valid coefficient. EIGEN_STRONG_INLINE operator bool() const { return m_iter; } protected: EvaluatorType m_eval; IteratorType m_iter; private: // If you get here, then you're not using the right InnerIterator type, e.g.: // SparseMatrix<double,RowMajor> A; // SparseMatrix<double>::InnerIterator it(A,0); template<typename T> InnerIterator(const EigenBase<T>&,Index outer); }; namespace internal { // Generic inner iterator implementation for dense objects template<typename XprType> class inner_iterator_selector<XprType, IndexBased> { protected: typedef evaluator<XprType> EvaluatorType; typedef typename traits<XprType>::Scalar Scalar; enum { IsRowMajor = (XprType::Flags&RowMajorBit)==RowMajorBit }; public: EIGEN_STRONG_INLINE inner_iterator_selector(const EvaluatorType &eval, const Index &outerId, const Index &innerSize) : m_eval(eval), m_inner(0), m_outer(outerId), m_end(innerSize) {} EIGEN_STRONG_INLINE Scalar value() const { return (IsRowMajor) ? m_eval.coeff(m_outer, m_inner) : m_eval.coeff(m_inner, m_outer); } EIGEN_STRONG_INLINE inner_iterator_selector& operator++() { m_inner++; return *this; } EIGEN_STRONG_INLINE Index index() const { return m_inner; } inline Index row() const { return IsRowMajor ? m_outer : index(); } inline Index col() const { return IsRowMajor ? index() : m_outer; } EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; } protected: const EvaluatorType& m_eval; Index m_inner; const Index m_outer; const Index m_end; }; // For iterator-based evaluator, inner-iterator is already implemented as // evaluator<>::InnerIterator template<typename XprType> class inner_iterator_selector<XprType, IteratorBased> : public evaluator<XprType>::InnerIterator { protected: typedef typename evaluator<XprType>::InnerIterator Base; typedef evaluator<XprType> EvaluatorType; public: EIGEN_STRONG_INLINE inner_iterator_selector(const EvaluatorType &eval, const Index &outerId, const Index &/*innerSize*/) : Base(eval, outerId) {} }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_COREITERATORS_H
4,525
34.359375
122
h
abess
abess-master/python/include/Eigen/src/Core/CwiseBinaryOp.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CWISE_BINARY_OP_H #define EIGEN_CWISE_BINARY_OP_H namespace Eigen { namespace internal { template<typename BinaryOp, typename Lhs, typename Rhs> struct traits<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > { // we must not inherit from traits<Lhs> since it has // the potential to cause problems with MSVC typedef typename remove_all<Lhs>::type Ancestor; typedef typename traits<Ancestor>::XprKind XprKind; enum { RowsAtCompileTime = traits<Ancestor>::RowsAtCompileTime, ColsAtCompileTime = traits<Ancestor>::ColsAtCompileTime, MaxRowsAtCompileTime = traits<Ancestor>::MaxRowsAtCompileTime, MaxColsAtCompileTime = traits<Ancestor>::MaxColsAtCompileTime }; // even though we require Lhs and Rhs to have the same scalar type (see CwiseBinaryOp constructor), // we still want to handle the case when the result type is different. typedef typename result_of< BinaryOp( const typename Lhs::Scalar&, const typename Rhs::Scalar& ) >::type Scalar; typedef typename cwise_promote_storage_type<typename traits<Lhs>::StorageKind, typename traits<Rhs>::StorageKind, BinaryOp>::ret StorageKind; typedef typename promote_index_type<typename traits<Lhs>::StorageIndex, typename traits<Rhs>::StorageIndex>::type StorageIndex; typedef typename Lhs::Nested LhsNested; typedef typename Rhs::Nested RhsNested; typedef typename remove_reference<LhsNested>::type _LhsNested; typedef typename remove_reference<RhsNested>::type _RhsNested; enum { Flags = cwise_promote_storage_order<typename traits<Lhs>::StorageKind,typename traits<Rhs>::StorageKind,_LhsNested::Flags & RowMajorBit,_RhsNested::Flags & RowMajorBit>::value }; }; } // end namespace internal template<typename BinaryOp, typename Lhs, typename Rhs, typename StorageKind> class CwiseBinaryOpImpl; /** \class CwiseBinaryOp * \ingroup Core_Module * * \brief Generic expression where a coefficient-wise binary operator is applied to two expressions * * \tparam BinaryOp template functor implementing the operator * \tparam LhsType the type of the left-hand side * \tparam RhsType the type of the right-hand side * * This class represents an expression where a coefficient-wise binary operator is applied to two expressions. * It is the return type of binary operators, by which we mean only those binary operators where * both the left-hand side and the right-hand side are Eigen expressions. * For example, the return type of matrix1+matrix2 is a CwiseBinaryOp. * * Most of the time, this is the only way that it is used, so you typically don't have to name * CwiseBinaryOp types explicitly. * * \sa MatrixBase::binaryExpr(const MatrixBase<OtherDerived> &,const CustomBinaryOp &) const, class CwiseUnaryOp, class CwiseNullaryOp */ template<typename BinaryOp, typename LhsType, typename RhsType> class CwiseBinaryOp : public CwiseBinaryOpImpl< BinaryOp, LhsType, RhsType, typename internal::cwise_promote_storage_type<typename internal::traits<LhsType>::StorageKind, typename internal::traits<RhsType>::StorageKind, BinaryOp>::ret>, internal::no_assignment_operator { public: typedef typename internal::remove_all<BinaryOp>::type Functor; typedef typename internal::remove_all<LhsType>::type Lhs; typedef typename internal::remove_all<RhsType>::type Rhs; typedef typename CwiseBinaryOpImpl< BinaryOp, LhsType, RhsType, typename internal::cwise_promote_storage_type<typename internal::traits<LhsType>::StorageKind, typename internal::traits<Rhs>::StorageKind, BinaryOp>::ret>::Base Base; EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseBinaryOp) typedef typename internal::ref_selector<LhsType>::type LhsNested; typedef typename internal::ref_selector<RhsType>::type RhsNested; typedef typename internal::remove_reference<LhsNested>::type _LhsNested; typedef typename internal::remove_reference<RhsNested>::type _RhsNested; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CwiseBinaryOp(const Lhs& aLhs, const Rhs& aRhs, const BinaryOp& func = BinaryOp()) : m_lhs(aLhs), m_rhs(aRhs), m_functor(func) { EIGEN_CHECK_BINARY_COMPATIBILIY(BinaryOp,typename Lhs::Scalar,typename Rhs::Scalar); // require the sizes to match EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs, Rhs) eigen_assert(aLhs.rows() == aRhs.rows() && aLhs.cols() == aRhs.cols()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rows() const { // return the fixed size type if available to enable compile time optimizations if (internal::traits<typename internal::remove_all<LhsNested>::type>::RowsAtCompileTime==Dynamic) return m_rhs.rows(); else return m_lhs.rows(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index cols() const { // return the fixed size type if available to enable compile time optimizations if (internal::traits<typename internal::remove_all<LhsNested>::type>::ColsAtCompileTime==Dynamic) return m_rhs.cols(); else return m_lhs.cols(); } /** \returns the left hand side nested expression */ EIGEN_DEVICE_FUNC const _LhsNested& lhs() const { return m_lhs; } /** \returns the right hand side nested expression */ EIGEN_DEVICE_FUNC const _RhsNested& rhs() const { return m_rhs; } /** \returns the functor representing the binary operation */ EIGEN_DEVICE_FUNC const BinaryOp& functor() const { return m_functor; } protected: LhsNested m_lhs; RhsNested m_rhs; const BinaryOp m_functor; }; // Generic API dispatcher template<typename BinaryOp, typename Lhs, typename Rhs, typename StorageKind> class CwiseBinaryOpImpl : public internal::generic_xpr_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >::type { public: typedef typename internal::generic_xpr_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >::type Base; }; /** replaces \c *this by \c *this - \a other. * * \returns a reference to \c *this */ template<typename Derived> template<typename OtherDerived> EIGEN_STRONG_INLINE Derived & MatrixBase<Derived>::operator-=(const MatrixBase<OtherDerived> &other) { call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>()); return derived(); } /** replaces \c *this by \c *this + \a other. * * \returns a reference to \c *this */ template<typename Derived> template<typename OtherDerived> EIGEN_STRONG_INLINE Derived & MatrixBase<Derived>::operator+=(const MatrixBase<OtherDerived>& other) { call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>()); return derived(); } } // end namespace Eigen #endif // EIGEN_CWISE_BINARY_OP_H
7,593
40.048649
179
h
abess
abess-master/python/include/Eigen/src/Core/CwiseNullaryOp.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CWISE_NULLARY_OP_H #define EIGEN_CWISE_NULLARY_OP_H namespace Eigen { namespace internal { template<typename NullaryOp, typename PlainObjectType> struct traits<CwiseNullaryOp<NullaryOp, PlainObjectType> > : traits<PlainObjectType> { enum { Flags = traits<PlainObjectType>::Flags & RowMajorBit }; }; } // namespace internal /** \class CwiseNullaryOp * \ingroup Core_Module * * \brief Generic expression of a matrix where all coefficients are defined by a functor * * \tparam NullaryOp template functor implementing the operator * \tparam PlainObjectType the underlying plain matrix/array type * * This class represents an expression of a generic nullary operator. * It is the return type of the Ones(), Zero(), Constant(), Identity() and Random() methods, * and most of the time this is the only way it is used. * * However, if you want to write a function returning such an expression, you * will need to use this class. * * The functor NullaryOp must expose one of the following method: <table class="manual"> <tr ><td>\c operator()() </td><td>if the procedural generation does not depend on the coefficient entries (e.g., random numbers)</td></tr> <tr class="alt"><td>\c operator()(Index i)</td><td>if the procedural generation makes sense for vectors only and that it depends on the coefficient index \c i (e.g., linspace) </td></tr> <tr ><td>\c operator()(Index i,Index j)</td><td>if the procedural generation depends on the matrix coordinates \c i, \c j (e.g., to generate a checkerboard with 0 and 1)</td></tr> </table> * It is also possible to expose the last two operators if the generation makes sense for matrices but can be optimized for vectors. * * See DenseBase::NullaryExpr(Index,const CustomNullaryOp&) for an example binding * C++11 random number generators. * * A nullary expression can also be used to implement custom sophisticated matrix manipulations * that cannot be covered by the existing set of natively supported matrix manipulations. * See this \ref TopicCustomizing_NullaryExpr "page" for some examples and additional explanations * on the behavior of CwiseNullaryOp. * * \sa class CwiseUnaryOp, class CwiseBinaryOp, DenseBase::NullaryExpr */ template<typename NullaryOp, typename PlainObjectType> class CwiseNullaryOp : public internal::dense_xpr_base< CwiseNullaryOp<NullaryOp, PlainObjectType> >::type, internal::no_assignment_operator { public: typedef typename internal::dense_xpr_base<CwiseNullaryOp>::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(CwiseNullaryOp) EIGEN_DEVICE_FUNC CwiseNullaryOp(Index rows, Index cols, const NullaryOp& func = NullaryOp()) : m_rows(rows), m_cols(cols), m_functor(func) { eigen_assert(rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) && cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rows() const { return m_rows.value(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index cols() const { return m_cols.value(); } /** \returns the functor representing the nullary operation */ EIGEN_DEVICE_FUNC const NullaryOp& functor() const { return m_functor; } protected: const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_rows; const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_cols; const NullaryOp m_functor; }; /** \returns an expression of a matrix defined by a custom functor \a func * * The parameters \a rows and \a cols are the number of rows and of columns of * the returned matrix. Must be compatible with this MatrixBase type. * * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used * instead. * * The template parameter \a CustomNullaryOp is the type of the functor. * * \sa class CwiseNullaryOp */ template<typename Derived> template<typename CustomNullaryOp> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject> DenseBase<Derived>::NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func) { return CwiseNullaryOp<CustomNullaryOp, PlainObject>(rows, cols, func); } /** \returns an expression of a matrix defined by a custom functor \a func * * The parameter \a size is the size of the returned vector. * Must be compatible with this MatrixBase type. * * \only_for_vectors * * This variant is meant to be used for dynamic-size vector types. For fixed-size types, * it is redundant to pass \a size as argument, so Zero() should be used * instead. * * The template parameter \a CustomNullaryOp is the type of the functor. * * Here is an example with C++11 random generators: \include random_cpp11.cpp * Output: \verbinclude random_cpp11.out * * \sa class CwiseNullaryOp */ template<typename Derived> template<typename CustomNullaryOp> EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject> DenseBase<Derived>::NullaryExpr(Index size, const CustomNullaryOp& func) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) if(RowsAtCompileTime == 1) return CwiseNullaryOp<CustomNullaryOp, PlainObject>(1, size, func); else return CwiseNullaryOp<CustomNullaryOp, PlainObject>(size, 1, func); } /** \returns an expression of a matrix defined by a custom functor \a func * * This variant is only for fixed-size DenseBase types. For dynamic-size types, you * need to use the variants taking size arguments. * * The template parameter \a CustomNullaryOp is the type of the functor. * * \sa class CwiseNullaryOp */ template<typename Derived> template<typename CustomNullaryOp> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject> DenseBase<Derived>::NullaryExpr(const CustomNullaryOp& func) { return CwiseNullaryOp<CustomNullaryOp, PlainObject>(RowsAtCompileTime, ColsAtCompileTime, func); } /** \returns an expression of a constant matrix of value \a value * * The parameters \a rows and \a cols are the number of rows and of columns of * the returned matrix. Must be compatible with this DenseBase type. * * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used * instead. * * The template parameter \a CustomNullaryOp is the type of the functor. * * \sa class CwiseNullaryOp */ template<typename Derived> EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType DenseBase<Derived>::Constant(Index rows, Index cols, const Scalar& value) { return DenseBase<Derived>::NullaryExpr(rows, cols, internal::scalar_constant_op<Scalar>(value)); } /** \returns an expression of a constant matrix of value \a value * * The parameter \a size is the size of the returned vector. * Must be compatible with this DenseBase type. * * \only_for_vectors * * This variant is meant to be used for dynamic-size vector types. For fixed-size types, * it is redundant to pass \a size as argument, so Zero() should be used * instead. * * The template parameter \a CustomNullaryOp is the type of the functor. * * \sa class CwiseNullaryOp */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType DenseBase<Derived>::Constant(Index size, const Scalar& value) { return DenseBase<Derived>::NullaryExpr(size, internal::scalar_constant_op<Scalar>(value)); } /** \returns an expression of a constant matrix of value \a value * * This variant is only for fixed-size DenseBase types. For dynamic-size types, you * need to use the variants taking size arguments. * * The template parameter \a CustomNullaryOp is the type of the functor. * * \sa class CwiseNullaryOp */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType DenseBase<Derived>::Constant(const Scalar& value) { EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) return DenseBase<Derived>::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_constant_op<Scalar>(value)); } /** \deprecated because of accuracy loss. In Eigen 3.3, it is an alias for LinSpaced(Index,const Scalar&,const Scalar&) * * \sa LinSpaced(Index,Scalar,Scalar), setLinSpaced(Index,const Scalar&,const Scalar&) */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType DenseBase<Derived>::LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar,PacketScalar>(low,high,size)); } /** \deprecated because of accuracy loss. In Eigen 3.3, it is an alias for LinSpaced(const Scalar&,const Scalar&) * * \sa LinSpaced(Scalar,Scalar) */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType DenseBase<Derived>::LinSpaced(Sequential_t, const Scalar& low, const Scalar& high) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar,PacketScalar>(low,high,Derived::SizeAtCompileTime)); } /** * \brief Sets a linearly spaced vector. * * The function generates 'size' equally spaced values in the closed interval [low,high]. * When size is set to 1, a vector of length 1 containing 'high' is returned. * * \only_for_vectors * * Example: \include DenseBase_LinSpaced.cpp * Output: \verbinclude DenseBase_LinSpaced.out * * For integer scalar types, an even spacing is possible if and only if the length of the range, * i.e., \c high-low is a scalar multiple of \c size-1, or if \c size is a scalar multiple of the * number of values \c high-low+1 (meaning each value can be repeated the same number of time). * If one of these two considions is not satisfied, then \c high is lowered to the largest value * satisfying one of this constraint. * Here are some examples: * * Example: \include DenseBase_LinSpacedInt.cpp * Output: \verbinclude DenseBase_LinSpacedInt.out * * \sa setLinSpaced(Index,const Scalar&,const Scalar&), CwiseNullaryOp */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType DenseBase<Derived>::LinSpaced(Index size, const Scalar& low, const Scalar& high) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar,PacketScalar>(low,high,size)); } /** * \copydoc DenseBase::LinSpaced(Index, const Scalar&, const Scalar&) * Special version for fixed size types which does not require the size parameter. */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType DenseBase<Derived>::LinSpaced(const Scalar& low, const Scalar& high) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar,PacketScalar>(low,high,Derived::SizeAtCompileTime)); } /** \returns true if all coefficients in this matrix are approximately equal to \a val, to within precision \a prec */ template<typename Derived> EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isApproxToConstant (const Scalar& val, const RealScalar& prec) const { typename internal::nested_eval<Derived,1>::type self(derived()); for(Index j = 0; j < cols(); ++j) for(Index i = 0; i < rows(); ++i) if(!internal::isApprox(self.coeff(i, j), val, prec)) return false; return true; } /** This is just an alias for isApproxToConstant(). * * \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */ template<typename Derived> EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isConstant (const Scalar& val, const RealScalar& prec) const { return isApproxToConstant(val, prec); } /** Alias for setConstant(): sets all coefficients in this expression to \a val. * * \sa setConstant(), Constant(), class CwiseNullaryOp */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void DenseBase<Derived>::fill(const Scalar& val) { setConstant(val); } /** Sets all coefficients in this expression to value \a val. * * \sa fill(), setConstant(Index,const Scalar&), setConstant(Index,Index,const Scalar&), setZero(), setOnes(), Constant(), class CwiseNullaryOp, setZero(), setOnes() */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setConstant(const Scalar& val) { return derived() = Constant(rows(), cols(), val); } /** Resizes to the given \a size, and sets all coefficients in this expression to the given value \a val. * * \only_for_vectors * * Example: \include Matrix_setConstant_int.cpp * Output: \verbinclude Matrix_setConstant_int.out * * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&) */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setConstant(Index size, const Scalar& val) { resize(size); return setConstant(val); } /** Resizes to the given size, and sets all coefficients in this expression to the given value \a val. * * \param rows the new number of rows * \param cols the new number of columns * \param val the value to which all coefficients are set * * Example: \include Matrix_setConstant_int_int.cpp * Output: \verbinclude Matrix_setConstant_int_int.out * * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&) */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setConstant(Index rows, Index cols, const Scalar& val) { resize(rows, cols); return setConstant(val); } /** * \brief Sets a linearly spaced vector. * * The function generates 'size' equally spaced values in the closed interval [low,high]. * When size is set to 1, a vector of length 1 containing 'high' is returned. * * \only_for_vectors * * Example: \include DenseBase_setLinSpaced.cpp * Output: \verbinclude DenseBase_setLinSpaced.out * * For integer scalar types, do not miss the explanations on the definition * of \link LinSpaced(Index,const Scalar&,const Scalar&) even spacing \endlink. * * \sa LinSpaced(Index,const Scalar&,const Scalar&), CwiseNullaryOp */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(Index newSize, const Scalar& low, const Scalar& high) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return derived() = Derived::NullaryExpr(newSize, internal::linspaced_op<Scalar,PacketScalar>(low,high,newSize)); } /** * \brief Sets a linearly spaced vector. * * The function fills \c *this with equally spaced values in the closed interval [low,high]. * When size is set to 1, a vector of length 1 containing 'high' is returned. * * \only_for_vectors * * For integer scalar types, do not miss the explanations on the definition * of \link LinSpaced(Index,const Scalar&,const Scalar&) even spacing \endlink. * * \sa LinSpaced(Index,const Scalar&,const Scalar&), setLinSpaced(Index, const Scalar&, const Scalar&), CwiseNullaryOp */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(const Scalar& low, const Scalar& high) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return setLinSpaced(size(), low, high); } // zero: /** \returns an expression of a zero matrix. * * The parameters \a rows and \a cols are the number of rows and of columns of * the returned matrix. Must be compatible with this MatrixBase type. * * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used * instead. * * Example: \include MatrixBase_zero_int_int.cpp * Output: \verbinclude MatrixBase_zero_int_int.out * * \sa Zero(), Zero(Index) */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType DenseBase<Derived>::Zero(Index rows, Index cols) { return Constant(rows, cols, Scalar(0)); } /** \returns an expression of a zero vector. * * The parameter \a size is the size of the returned vector. * Must be compatible with this MatrixBase type. * * \only_for_vectors * * This variant is meant to be used for dynamic-size vector types. For fixed-size types, * it is redundant to pass \a size as argument, so Zero() should be used * instead. * * Example: \include MatrixBase_zero_int.cpp * Output: \verbinclude MatrixBase_zero_int.out * * \sa Zero(), Zero(Index,Index) */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType DenseBase<Derived>::Zero(Index size) { return Constant(size, Scalar(0)); } /** \returns an expression of a fixed-size zero matrix or vector. * * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you * need to use the variants taking size arguments. * * Example: \include MatrixBase_zero.cpp * Output: \verbinclude MatrixBase_zero.out * * \sa Zero(Index), Zero(Index,Index) */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType DenseBase<Derived>::Zero() { return Constant(Scalar(0)); } /** \returns true if *this is approximately equal to the zero matrix, * within the precision given by \a prec. * * Example: \include MatrixBase_isZero.cpp * Output: \verbinclude MatrixBase_isZero.out * * \sa class CwiseNullaryOp, Zero() */ template<typename Derived> EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isZero(const RealScalar& prec) const { typename internal::nested_eval<Derived,1>::type self(derived()); for(Index j = 0; j < cols(); ++j) for(Index i = 0; i < rows(); ++i) if(!internal::isMuchSmallerThan(self.coeff(i, j), static_cast<Scalar>(1), prec)) return false; return true; } /** Sets all coefficients in this expression to zero. * * Example: \include MatrixBase_setZero.cpp * Output: \verbinclude MatrixBase_setZero.out * * \sa class CwiseNullaryOp, Zero() */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setZero() { return setConstant(Scalar(0)); } /** Resizes to the given \a size, and sets all coefficients in this expression to zero. * * \only_for_vectors * * Example: \include Matrix_setZero_int.cpp * Output: \verbinclude Matrix_setZero_int.out * * \sa DenseBase::setZero(), setZero(Index,Index), class CwiseNullaryOp, DenseBase::Zero() */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setZero(Index newSize) { resize(newSize); return setConstant(Scalar(0)); } /** Resizes to the given size, and sets all coefficients in this expression to zero. * * \param rows the new number of rows * \param cols the new number of columns * * Example: \include Matrix_setZero_int_int.cpp * Output: \verbinclude Matrix_setZero_int_int.out * * \sa DenseBase::setZero(), setZero(Index), class CwiseNullaryOp, DenseBase::Zero() */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setZero(Index rows, Index cols) { resize(rows, cols); return setConstant(Scalar(0)); } // ones: /** \returns an expression of a matrix where all coefficients equal one. * * The parameters \a rows and \a cols are the number of rows and of columns of * the returned matrix. Must be compatible with this MatrixBase type. * * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, * it is redundant to pass \a rows and \a cols as arguments, so Ones() should be used * instead. * * Example: \include MatrixBase_ones_int_int.cpp * Output: \verbinclude MatrixBase_ones_int_int.out * * \sa Ones(), Ones(Index), isOnes(), class Ones */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType DenseBase<Derived>::Ones(Index rows, Index cols) { return Constant(rows, cols, Scalar(1)); } /** \returns an expression of a vector where all coefficients equal one. * * The parameter \a newSize is the size of the returned vector. * Must be compatible with this MatrixBase type. * * \only_for_vectors * * This variant is meant to be used for dynamic-size vector types. For fixed-size types, * it is redundant to pass \a size as argument, so Ones() should be used * instead. * * Example: \include MatrixBase_ones_int.cpp * Output: \verbinclude MatrixBase_ones_int.out * * \sa Ones(), Ones(Index,Index), isOnes(), class Ones */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType DenseBase<Derived>::Ones(Index newSize) { return Constant(newSize, Scalar(1)); } /** \returns an expression of a fixed-size matrix or vector where all coefficients equal one. * * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you * need to use the variants taking size arguments. * * Example: \include MatrixBase_ones.cpp * Output: \verbinclude MatrixBase_ones.out * * \sa Ones(Index), Ones(Index,Index), isOnes(), class Ones */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType DenseBase<Derived>::Ones() { return Constant(Scalar(1)); } /** \returns true if *this is approximately equal to the matrix where all coefficients * are equal to 1, within the precision given by \a prec. * * Example: \include MatrixBase_isOnes.cpp * Output: \verbinclude MatrixBase_isOnes.out * * \sa class CwiseNullaryOp, Ones() */ template<typename Derived> EIGEN_DEVICE_FUNC bool DenseBase<Derived>::isOnes (const RealScalar& prec) const { return isApproxToConstant(Scalar(1), prec); } /** Sets all coefficients in this expression to one. * * Example: \include MatrixBase_setOnes.cpp * Output: \verbinclude MatrixBase_setOnes.out * * \sa class CwiseNullaryOp, Ones() */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setOnes() { return setConstant(Scalar(1)); } /** Resizes to the given \a newSize, and sets all coefficients in this expression to one. * * \only_for_vectors * * Example: \include Matrix_setOnes_int.cpp * Output: \verbinclude Matrix_setOnes_int.out * * \sa MatrixBase::setOnes(), setOnes(Index,Index), class CwiseNullaryOp, MatrixBase::Ones() */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setOnes(Index newSize) { resize(newSize); return setConstant(Scalar(1)); } /** Resizes to the given size, and sets all coefficients in this expression to one. * * \param rows the new number of rows * \param cols the new number of columns * * Example: \include Matrix_setOnes_int_int.cpp * Output: \verbinclude Matrix_setOnes_int_int.out * * \sa MatrixBase::setOnes(), setOnes(Index), class CwiseNullaryOp, MatrixBase::Ones() */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& PlainObjectBase<Derived>::setOnes(Index rows, Index cols) { resize(rows, cols); return setConstant(Scalar(1)); } // Identity: /** \returns an expression of the identity matrix (not necessarily square). * * The parameters \a rows and \a cols are the number of rows and of columns of * the returned matrix. Must be compatible with this MatrixBase type. * * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, * it is redundant to pass \a rows and \a cols as arguments, so Identity() should be used * instead. * * Example: \include MatrixBase_identity_int_int.cpp * Output: \verbinclude MatrixBase_identity_int_int.out * * \sa Identity(), setIdentity(), isIdentity() */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType MatrixBase<Derived>::Identity(Index rows, Index cols) { return DenseBase<Derived>::NullaryExpr(rows, cols, internal::scalar_identity_op<Scalar>()); } /** \returns an expression of the identity matrix (not necessarily square). * * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you * need to use the variant taking size arguments. * * Example: \include MatrixBase_identity.cpp * Output: \verbinclude MatrixBase_identity.out * * \sa Identity(Index,Index), setIdentity(), isIdentity() */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType MatrixBase<Derived>::Identity() { EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) return MatrixBase<Derived>::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_identity_op<Scalar>()); } /** \returns true if *this is approximately equal to the identity matrix * (not necessarily square), * within the precision given by \a prec. * * Example: \include MatrixBase_isIdentity.cpp * Output: \verbinclude MatrixBase_isIdentity.out * * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), setIdentity() */ template<typename Derived> bool MatrixBase<Derived>::isIdentity (const RealScalar& prec) const { typename internal::nested_eval<Derived,1>::type self(derived()); for(Index j = 0; j < cols(); ++j) { for(Index i = 0; i < rows(); ++i) { if(i == j) { if(!internal::isApprox(self.coeff(i, j), static_cast<Scalar>(1), prec)) return false; } else { if(!internal::isMuchSmallerThan(self.coeff(i, j), static_cast<RealScalar>(1), prec)) return false; } } } return true; } namespace internal { template<typename Derived, bool Big = (Derived::SizeAtCompileTime>=16)> struct setIdentity_impl { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Derived& run(Derived& m) { return m = Derived::Identity(m.rows(), m.cols()); } }; template<typename Derived> struct setIdentity_impl<Derived, true> { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Derived& run(Derived& m) { m.setZero(); const Index size = numext::mini(m.rows(), m.cols()); for(Index i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1); return m; } }; } // end namespace internal /** Writes the identity expression (not necessarily square) into *this. * * Example: \include MatrixBase_setIdentity.cpp * Output: \verbinclude MatrixBase_setIdentity.out * * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), isIdentity() */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity() { return internal::setIdentity_impl<Derived>::run(derived()); } /** \brief Resizes to the given size, and writes the identity expression (not necessarily square) into *this. * * \param rows the new number of rows * \param cols the new number of columns * * Example: \include Matrix_setIdentity_int_int.cpp * Output: \verbinclude Matrix_setIdentity_int_int.out * * \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Identity() */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity(Index rows, Index cols) { derived().resize(rows, cols); return setIdentity(); } /** \returns an expression of the i-th unit (basis) vector. * * \only_for_vectors * * \sa MatrixBase::Unit(Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index newSize, Index i) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return BasisReturnType(SquareMatrixType::Identity(newSize,newSize), i); } /** \returns an expression of the i-th unit (basis) vector. * * \only_for_vectors * * This variant is for fixed-size vector only. * * \sa MatrixBase::Unit(Index,Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index i) { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) return BasisReturnType(SquareMatrixType::Identity(),i); } /** \returns an expression of the X axis unit vector (1{,0}^*) * * \only_for_vectors * * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitX() { return Derived::Unit(0); } /** \returns an expression of the Y axis unit vector (0,1{,0}^*) * * \only_for_vectors * * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitY() { return Derived::Unit(1); } /** \returns an expression of the Z axis unit vector (0,0,1{,0}^*) * * \only_for_vectors * * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitZ() { return Derived::Unit(2); } /** \returns an expression of the W axis unit vector (0,0,0,1) * * \only_for_vectors * * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() */ template<typename Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitW() { return Derived::Unit(3); } } // end namespace Eigen #endif // EIGEN_CWISE_NULLARY_OP_H
31,424
35.245675
194
h
abess
abess-master/python/include/Eigen/src/Core/CwiseTernaryOp.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2016 Eugene Brevdo <ebrevdo@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CWISE_TERNARY_OP_H #define EIGEN_CWISE_TERNARY_OP_H namespace Eigen { namespace internal { template <typename TernaryOp, typename Arg1, typename Arg2, typename Arg3> struct traits<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> > { // we must not inherit from traits<Arg1> since it has // the potential to cause problems with MSVC typedef typename remove_all<Arg1>::type Ancestor; typedef typename traits<Ancestor>::XprKind XprKind; enum { RowsAtCompileTime = traits<Ancestor>::RowsAtCompileTime, ColsAtCompileTime = traits<Ancestor>::ColsAtCompileTime, MaxRowsAtCompileTime = traits<Ancestor>::MaxRowsAtCompileTime, MaxColsAtCompileTime = traits<Ancestor>::MaxColsAtCompileTime }; // even though we require Arg1, Arg2, and Arg3 to have the same scalar type // (see CwiseTernaryOp constructor), // we still want to handle the case when the result type is different. typedef typename result_of<TernaryOp( const typename Arg1::Scalar&, const typename Arg2::Scalar&, const typename Arg3::Scalar&)>::type Scalar; typedef typename internal::traits<Arg1>::StorageKind StorageKind; typedef typename internal::traits<Arg1>::StorageIndex StorageIndex; typedef typename Arg1::Nested Arg1Nested; typedef typename Arg2::Nested Arg2Nested; typedef typename Arg3::Nested Arg3Nested; typedef typename remove_reference<Arg1Nested>::type _Arg1Nested; typedef typename remove_reference<Arg2Nested>::type _Arg2Nested; typedef typename remove_reference<Arg3Nested>::type _Arg3Nested; enum { Flags = _Arg1Nested::Flags & RowMajorBit }; }; } // end namespace internal template <typename TernaryOp, typename Arg1, typename Arg2, typename Arg3, typename StorageKind> class CwiseTernaryOpImpl; /** \class CwiseTernaryOp * \ingroup Core_Module * * \brief Generic expression where a coefficient-wise ternary operator is * applied to two expressions * * \tparam TernaryOp template functor implementing the operator * \tparam Arg1Type the type of the first argument * \tparam Arg2Type the type of the second argument * \tparam Arg3Type the type of the third argument * * This class represents an expression where a coefficient-wise ternary * operator is applied to three expressions. * It is the return type of ternary operators, by which we mean only those * ternary operators where * all three arguments are Eigen expressions. * For example, the return type of betainc(matrix1, matrix2, matrix3) is a * CwiseTernaryOp. * * Most of the time, this is the only way that it is used, so you typically * don't have to name * CwiseTernaryOp types explicitly. * * \sa MatrixBase::ternaryExpr(const MatrixBase<Argument2> &, const * MatrixBase<Argument3> &, const CustomTernaryOp &) const, class CwiseBinaryOp, * class CwiseUnaryOp, class CwiseNullaryOp */ template <typename TernaryOp, typename Arg1Type, typename Arg2Type, typename Arg3Type> class CwiseTernaryOp : public CwiseTernaryOpImpl< TernaryOp, Arg1Type, Arg2Type, Arg3Type, typename internal::traits<Arg1Type>::StorageKind>, internal::no_assignment_operator { public: typedef typename internal::remove_all<Arg1Type>::type Arg1; typedef typename internal::remove_all<Arg2Type>::type Arg2; typedef typename internal::remove_all<Arg3Type>::type Arg3; typedef typename CwiseTernaryOpImpl< TernaryOp, Arg1Type, Arg2Type, Arg3Type, typename internal::traits<Arg1Type>::StorageKind>::Base Base; EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseTernaryOp) typedef typename internal::ref_selector<Arg1Type>::type Arg1Nested; typedef typename internal::ref_selector<Arg2Type>::type Arg2Nested; typedef typename internal::ref_selector<Arg3Type>::type Arg3Nested; typedef typename internal::remove_reference<Arg1Nested>::type _Arg1Nested; typedef typename internal::remove_reference<Arg2Nested>::type _Arg2Nested; typedef typename internal::remove_reference<Arg3Nested>::type _Arg3Nested; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CwiseTernaryOp(const Arg1& a1, const Arg2& a2, const Arg3& a3, const TernaryOp& func = TernaryOp()) : m_arg1(a1), m_arg2(a2), m_arg3(a3), m_functor(func) { // require the sizes to match EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Arg1, Arg2) EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Arg1, Arg3) // The index types should match EIGEN_STATIC_ASSERT((internal::is_same< typename internal::traits<Arg1Type>::StorageKind, typename internal::traits<Arg2Type>::StorageKind>::value), STORAGE_KIND_MUST_MATCH) EIGEN_STATIC_ASSERT((internal::is_same< typename internal::traits<Arg1Type>::StorageKind, typename internal::traits<Arg3Type>::StorageKind>::value), STORAGE_KIND_MUST_MATCH) eigen_assert(a1.rows() == a2.rows() && a1.cols() == a2.cols() && a1.rows() == a3.rows() && a1.cols() == a3.cols()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rows() const { // return the fixed size type if available to enable compile time // optimizations if (internal::traits<typename internal::remove_all<Arg1Nested>::type>:: RowsAtCompileTime == Dynamic && internal::traits<typename internal::remove_all<Arg2Nested>::type>:: RowsAtCompileTime == Dynamic) return m_arg3.rows(); else if (internal::traits<typename internal::remove_all<Arg1Nested>::type>:: RowsAtCompileTime == Dynamic && internal::traits<typename internal::remove_all<Arg3Nested>::type>:: RowsAtCompileTime == Dynamic) return m_arg2.rows(); else return m_arg1.rows(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index cols() const { // return the fixed size type if available to enable compile time // optimizations if (internal::traits<typename internal::remove_all<Arg1Nested>::type>:: ColsAtCompileTime == Dynamic && internal::traits<typename internal::remove_all<Arg2Nested>::type>:: ColsAtCompileTime == Dynamic) return m_arg3.cols(); else if (internal::traits<typename internal::remove_all<Arg1Nested>::type>:: ColsAtCompileTime == Dynamic && internal::traits<typename internal::remove_all<Arg3Nested>::type>:: ColsAtCompileTime == Dynamic) return m_arg2.cols(); else return m_arg1.cols(); } /** \returns the first argument nested expression */ EIGEN_DEVICE_FUNC const _Arg1Nested& arg1() const { return m_arg1; } /** \returns the first argument nested expression */ EIGEN_DEVICE_FUNC const _Arg2Nested& arg2() const { return m_arg2; } /** \returns the third argument nested expression */ EIGEN_DEVICE_FUNC const _Arg3Nested& arg3() const { return m_arg3; } /** \returns the functor representing the ternary operation */ EIGEN_DEVICE_FUNC const TernaryOp& functor() const { return m_functor; } protected: Arg1Nested m_arg1; Arg2Nested m_arg2; Arg3Nested m_arg3; const TernaryOp m_functor; }; // Generic API dispatcher template <typename TernaryOp, typename Arg1, typename Arg2, typename Arg3, typename StorageKind> class CwiseTernaryOpImpl : public internal::generic_xpr_base< CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >::type { public: typedef typename internal::generic_xpr_base< CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >::type Base; }; } // end namespace Eigen #endif // EIGEN_CWISE_TERNARY_OP_H
8,256
40.70202
83
h
abess
abess-master/python/include/Eigen/src/Core/CwiseUnaryOp.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr> // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CWISE_UNARY_OP_H #define EIGEN_CWISE_UNARY_OP_H namespace Eigen { namespace internal { template<typename UnaryOp, typename XprType> struct traits<CwiseUnaryOp<UnaryOp, XprType> > : traits<XprType> { typedef typename result_of< UnaryOp(const typename XprType::Scalar&) >::type Scalar; typedef typename XprType::Nested XprTypeNested; typedef typename remove_reference<XprTypeNested>::type _XprTypeNested; enum { Flags = _XprTypeNested::Flags & RowMajorBit }; }; } template<typename UnaryOp, typename XprType, typename StorageKind> class CwiseUnaryOpImpl; /** \class CwiseUnaryOp * \ingroup Core_Module * * \brief Generic expression where a coefficient-wise unary operator is applied to an expression * * \tparam UnaryOp template functor implementing the operator * \tparam XprType the type of the expression to which we are applying the unary operator * * This class represents an expression where a unary operator is applied to an expression. * It is the return type of all operations taking exactly 1 input expression, regardless of the * presence of other inputs such as scalars. For example, the operator* in the expression 3*matrix * is considered unary, because only the right-hand side is an expression, and its * return type is a specialization of CwiseUnaryOp. * * Most of the time, this is the only way that it is used, so you typically don't have to name * CwiseUnaryOp types explicitly. * * \sa MatrixBase::unaryExpr(const CustomUnaryOp &) const, class CwiseBinaryOp, class CwiseNullaryOp */ template<typename UnaryOp, typename XprType> class CwiseUnaryOp : public CwiseUnaryOpImpl<UnaryOp, XprType, typename internal::traits<XprType>::StorageKind>, internal::no_assignment_operator { public: typedef typename CwiseUnaryOpImpl<UnaryOp, XprType,typename internal::traits<XprType>::StorageKind>::Base Base; EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryOp) typedef typename internal::ref_selector<XprType>::type XprTypeNested; typedef typename internal::remove_all<XprType>::type NestedExpression; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit CwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp()) : m_xpr(xpr), m_functor(func) {} EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rows() const { return m_xpr.rows(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index cols() const { return m_xpr.cols(); } /** \returns the functor representing the unary operation */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const UnaryOp& functor() const { return m_functor; } /** \returns the nested expression */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename internal::remove_all<XprTypeNested>::type& nestedExpression() const { return m_xpr; } /** \returns the nested expression */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::remove_all<XprTypeNested>::type& nestedExpression() { return m_xpr; } protected: XprTypeNested m_xpr; const UnaryOp m_functor; }; // Generic API dispatcher template<typename UnaryOp, typename XprType, typename StorageKind> class CwiseUnaryOpImpl : public internal::generic_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type { public: typedef typename internal::generic_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type Base; }; } // end namespace Eigen #endif // EIGEN_CWISE_UNARY_OP_H
3,877
36.288462
145
h
abess
abess-master/python/include/Eigen/src/Core/CwiseUnaryView.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CWISE_UNARY_VIEW_H #define EIGEN_CWISE_UNARY_VIEW_H namespace Eigen { namespace internal { template<typename ViewOp, typename MatrixType> struct traits<CwiseUnaryView<ViewOp, MatrixType> > : traits<MatrixType> { typedef typename result_of< ViewOp(const typename traits<MatrixType>::Scalar&) >::type Scalar; typedef typename MatrixType::Nested MatrixTypeNested; typedef typename remove_all<MatrixTypeNested>::type _MatrixTypeNested; enum { FlagsLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0, Flags = traits<_MatrixTypeNested>::Flags & (RowMajorBit | FlagsLvalueBit | DirectAccessBit), // FIXME DirectAccessBit should not be handled by expressions MatrixTypeInnerStride = inner_stride_at_compile_time<MatrixType>::ret, // need to cast the sizeof's from size_t to int explicitly, otherwise: // "error: no integral type can represent all of the enumerator values InnerStrideAtCompileTime = MatrixTypeInnerStride == Dynamic ? int(Dynamic) : int(MatrixTypeInnerStride) * int(sizeof(typename traits<MatrixType>::Scalar) / sizeof(Scalar)), OuterStrideAtCompileTime = outer_stride_at_compile_time<MatrixType>::ret == Dynamic ? int(Dynamic) : outer_stride_at_compile_time<MatrixType>::ret * int(sizeof(typename traits<MatrixType>::Scalar) / sizeof(Scalar)) }; }; } template<typename ViewOp, typename MatrixType, typename StorageKind> class CwiseUnaryViewImpl; /** \class CwiseUnaryView * \ingroup Core_Module * * \brief Generic lvalue expression of a coefficient-wise unary operator of a matrix or a vector * * \tparam ViewOp template functor implementing the view * \tparam MatrixType the type of the matrix we are applying the unary operator * * This class represents a lvalue expression of a generic unary view operator of a matrix or a vector. * It is the return type of real() and imag(), and most of the time this is the only way it is used. * * \sa MatrixBase::unaryViewExpr(const CustomUnaryOp &) const, class CwiseUnaryOp */ template<typename ViewOp, typename MatrixType> class CwiseUnaryView : public CwiseUnaryViewImpl<ViewOp, MatrixType, typename internal::traits<MatrixType>::StorageKind> { public: typedef typename CwiseUnaryViewImpl<ViewOp, MatrixType,typename internal::traits<MatrixType>::StorageKind>::Base Base; EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryView) typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested; typedef typename internal::remove_all<MatrixType>::type NestedExpression; explicit inline CwiseUnaryView(MatrixType& mat, const ViewOp& func = ViewOp()) : m_matrix(mat), m_functor(func) {} EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryView) EIGEN_STRONG_INLINE Index rows() const { return m_matrix.rows(); } EIGEN_STRONG_INLINE Index cols() const { return m_matrix.cols(); } /** \returns the functor representing unary operation */ const ViewOp& functor() const { return m_functor; } /** \returns the nested expression */ const typename internal::remove_all<MatrixTypeNested>::type& nestedExpression() const { return m_matrix; } /** \returns the nested expression */ typename internal::remove_reference<MatrixTypeNested>::type& nestedExpression() { return m_matrix.const_cast_derived(); } protected: MatrixTypeNested m_matrix; ViewOp m_functor; }; // Generic API dispatcher template<typename ViewOp, typename XprType, typename StorageKind> class CwiseUnaryViewImpl : public internal::generic_xpr_base<CwiseUnaryView<ViewOp, XprType> >::type { public: typedef typename internal::generic_xpr_base<CwiseUnaryView<ViewOp, XprType> >::type Base; }; template<typename ViewOp, typename MatrixType> class CwiseUnaryViewImpl<ViewOp,MatrixType,Dense> : public internal::dense_xpr_base< CwiseUnaryView<ViewOp, MatrixType> >::type { public: typedef CwiseUnaryView<ViewOp, MatrixType> Derived; typedef typename internal::dense_xpr_base< CwiseUnaryView<ViewOp, MatrixType> >::type Base; EIGEN_DENSE_PUBLIC_INTERFACE(Derived) EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryViewImpl) EIGEN_DEVICE_FUNC inline Scalar* data() { return &(this->coeffRef(0)); } EIGEN_DEVICE_FUNC inline const Scalar* data() const { return &(this->coeff(0)); } EIGEN_DEVICE_FUNC inline Index innerStride() const { return derived().nestedExpression().innerStride() * sizeof(typename internal::traits<MatrixType>::Scalar) / sizeof(Scalar); } EIGEN_DEVICE_FUNC inline Index outerStride() const { return derived().nestedExpression().outerStride() * sizeof(typename internal::traits<MatrixType>::Scalar) / sizeof(Scalar); } }; } // end namespace Eigen #endif // EIGEN_CWISE_UNARY_VIEW_H
5,282
39.953488
158
h
abess
abess-master/python/include/Eigen/src/Core/DenseBase.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com> // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DENSEBASE_H #define EIGEN_DENSEBASE_H namespace Eigen { namespace internal { // The index type defined by EIGEN_DEFAULT_DENSE_INDEX_TYPE must be a signed type. // This dummy function simply aims at checking that at compile time. static inline void check_DenseIndex_is_signed() { EIGEN_STATIC_ASSERT(NumTraits<DenseIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); } } // end namespace internal /** \class DenseBase * \ingroup Core_Module * * \brief Base class for all dense matrices, vectors, and arrays * * This class is the base that is inherited by all dense objects (matrix, vector, arrays, * and related expression types). The common Eigen API for dense objects is contained in this class. * * \tparam Derived is the derived type, e.g., a matrix type or an expression. * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_DENSEBASE_PLUGIN. * * \sa \blank \ref TopicClassHierarchy */ template<typename Derived> class DenseBase #ifndef EIGEN_PARSED_BY_DOXYGEN : public DenseCoeffsBase<Derived> #else : public DenseCoeffsBase<Derived,DirectWriteAccessors> #endif // not EIGEN_PARSED_BY_DOXYGEN { public: /** Inner iterator type to iterate over the coefficients of a row or column. * \sa class InnerIterator */ typedef Eigen::InnerIterator<Derived> InnerIterator; typedef typename internal::traits<Derived>::StorageKind StorageKind; /** * \brief The type used to store indices * \details This typedef is relevant for types that store multiple indices such as * PermutationMatrix or Transpositions, otherwise it defaults to Eigen::Index * \sa \blank \ref TopicPreprocessorDirectives, Eigen::Index, SparseMatrixBase. */ typedef typename internal::traits<Derived>::StorageIndex StorageIndex; /** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex<float>, etc. */ typedef typename internal::traits<Derived>::Scalar Scalar; /** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex<float>, etc. * * It is an alias for the Scalar type */ typedef Scalar value_type; typedef typename NumTraits<Scalar>::Real RealScalar; typedef DenseCoeffsBase<Derived> Base; using Base::derived; using Base::const_cast_derived; using Base::rows; using Base::cols; using Base::size; using Base::rowIndexByOuterInner; using Base::colIndexByOuterInner; using Base::coeff; using Base::coeffByOuterInner; using Base::operator(); using Base::operator[]; using Base::x; using Base::y; using Base::z; using Base::w; using Base::stride; using Base::innerStride; using Base::outerStride; using Base::rowStride; using Base::colStride; typedef typename Base::CoeffReturnType CoeffReturnType; enum { RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime, /**< The number of rows at compile-time. This is just a copy of the value provided * by the \a Derived type. If a value is not known at compile-time, * it is set to the \a Dynamic constant. * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */ ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime, /**< The number of columns at compile-time. This is just a copy of the value provided * by the \a Derived type. If a value is not known at compile-time, * it is set to the \a Dynamic constant. * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */ SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime, internal::traits<Derived>::ColsAtCompileTime>::ret), /**< This is equal to the number of coefficients, i.e. the number of * rows times the number of columns, or to \a Dynamic if this is not * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */ MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime, /**< This value is equal to the maximum possible number of rows that this expression * might have. If this expression might have an arbitrarily high number of rows, * this value is set to \a Dynamic. * * This value is useful to know when evaluating an expression, in order to determine * whether it is possible to avoid doing a dynamic memory allocation. * * \sa RowsAtCompileTime, MaxColsAtCompileTime, MaxSizeAtCompileTime */ MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime, /**< This value is equal to the maximum possible number of columns that this expression * might have. If this expression might have an arbitrarily high number of columns, * this value is set to \a Dynamic. * * This value is useful to know when evaluating an expression, in order to determine * whether it is possible to avoid doing a dynamic memory allocation. * * \sa ColsAtCompileTime, MaxRowsAtCompileTime, MaxSizeAtCompileTime */ MaxSizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::MaxRowsAtCompileTime, internal::traits<Derived>::MaxColsAtCompileTime>::ret), /**< This value is equal to the maximum possible number of coefficients that this expression * might have. If this expression might have an arbitrarily high number of coefficients, * this value is set to \a Dynamic. * * This value is useful to know when evaluating an expression, in order to determine * whether it is possible to avoid doing a dynamic memory allocation. * * \sa SizeAtCompileTime, MaxRowsAtCompileTime, MaxColsAtCompileTime */ IsVectorAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime == 1 || internal::traits<Derived>::MaxColsAtCompileTime == 1, /**< This is set to true if either the number of rows or the number of * columns is known at compile-time to be equal to 1. Indeed, in that case, * we are dealing with a column-vector (if there is only one column) or with * a row-vector (if there is only one row). */ Flags = internal::traits<Derived>::Flags, /**< This stores expression \ref flags flags which may or may not be inherited by new expressions * constructed from this one. See the \ref flags "list of flags". */ IsRowMajor = int(Flags) & RowMajorBit, /**< True if this expression has row-major storage order. */ InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime) : int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime), InnerStrideAtCompileTime = internal::inner_stride_at_compile_time<Derived>::ret, OuterStrideAtCompileTime = internal::outer_stride_at_compile_time<Derived>::ret }; typedef typename internal::find_best_packet<Scalar,SizeAtCompileTime>::type PacketScalar; enum { IsPlainObjectBase = 0 }; /** The plain matrix type corresponding to this expression. * \sa PlainObject */ typedef Matrix<typename internal::traits<Derived>::Scalar, internal::traits<Derived>::RowsAtCompileTime, internal::traits<Derived>::ColsAtCompileTime, AutoAlign | (internal::traits<Derived>::Flags&RowMajorBit ? RowMajor : ColMajor), internal::traits<Derived>::MaxRowsAtCompileTime, internal::traits<Derived>::MaxColsAtCompileTime > PlainMatrix; /** The plain array type corresponding to this expression. * \sa PlainObject */ typedef Array<typename internal::traits<Derived>::Scalar, internal::traits<Derived>::RowsAtCompileTime, internal::traits<Derived>::ColsAtCompileTime, AutoAlign | (internal::traits<Derived>::Flags&RowMajorBit ? RowMajor : ColMajor), internal::traits<Derived>::MaxRowsAtCompileTime, internal::traits<Derived>::MaxColsAtCompileTime > PlainArray; /** \brief The plain matrix or array type corresponding to this expression. * * This is not necessarily exactly the return type of eval(). In the case of plain matrices, * the return type of eval() is a const reference to a matrix, not a matrix! It is however guaranteed * that the return type of eval() is either PlainObject or const PlainObject&. */ typedef typename internal::conditional<internal::is_same<typename internal::traits<Derived>::XprKind,MatrixXpr >::value, PlainMatrix, PlainArray>::type PlainObject; /** \returns the number of nonzero coefficients which is in practice the number * of stored coefficients. */ EIGEN_DEVICE_FUNC inline Index nonZeros() const { return size(); } /** \returns the outer size. * * \note For a vector, this returns just 1. For a matrix (non-vector), this is the major dimension * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of columns for a * column-major matrix, and the number of rows for a row-major matrix. */ EIGEN_DEVICE_FUNC Index outerSize() const { return IsVectorAtCompileTime ? 1 : int(IsRowMajor) ? this->rows() : this->cols(); } /** \returns the inner size. * * \note For a vector, this is just the size. For a matrix (non-vector), this is the minor dimension * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of rows for a * column-major matrix, and the number of columns for a row-major matrix. */ EIGEN_DEVICE_FUNC Index innerSize() const { return IsVectorAtCompileTime ? this->size() : int(IsRowMajor) ? this->cols() : this->rows(); } /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does * nothing else. */ EIGEN_DEVICE_FUNC void resize(Index newSize) { EIGEN_ONLY_USED_FOR_DEBUG(newSize); eigen_assert(newSize == this->size() && "DenseBase::resize() does not actually allow to resize."); } /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does * nothing else. */ EIGEN_DEVICE_FUNC void resize(Index rows, Index cols) { EIGEN_ONLY_USED_FOR_DEBUG(rows); EIGEN_ONLY_USED_FOR_DEBUG(cols); eigen_assert(rows == this->rows() && cols == this->cols() && "DenseBase::resize() does not actually allow to resize."); } #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal Represents a matrix with all coefficients equal to one another*/ typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,PlainObject> ConstantReturnType; /** \internal \deprecated Represents a vector with linearly spaced coefficients that allows sequential access only. */ typedef CwiseNullaryOp<internal::linspaced_op<Scalar,PacketScalar>,PlainObject> SequentialLinSpacedReturnType; /** \internal Represents a vector with linearly spaced coefficients that allows random access. */ typedef CwiseNullaryOp<internal::linspaced_op<Scalar,PacketScalar>,PlainObject> RandomAccessLinSpacedReturnType; /** \internal the return type of MatrixBase::eigenvalues() */ typedef Matrix<typename NumTraits<typename internal::traits<Derived>::Scalar>::Real, internal::traits<Derived>::ColsAtCompileTime, 1> EigenvaluesReturnType; #endif // not EIGEN_PARSED_BY_DOXYGEN /** Copies \a other into *this. \returns a reference to *this. */ template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const DenseBase<OtherDerived>& other); /** Special case of the template operator=, in order to prevent the compiler * from generating a default operator= (issue hit with g++ 4.1) */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const DenseBase& other); template<typename OtherDerived> EIGEN_DEVICE_FUNC Derived& operator=(const EigenBase<OtherDerived> &other); template<typename OtherDerived> EIGEN_DEVICE_FUNC Derived& operator+=(const EigenBase<OtherDerived> &other); template<typename OtherDerived> EIGEN_DEVICE_FUNC Derived& operator-=(const EigenBase<OtherDerived> &other); template<typename OtherDerived> EIGEN_DEVICE_FUNC Derived& operator=(const ReturnByValue<OtherDerived>& func); /** \internal * Copies \a other into *this without evaluating other. \returns a reference to *this. * \deprecated */ template<typename OtherDerived> EIGEN_DEVICE_FUNC Derived& lazyAssign(const DenseBase<OtherDerived>& other); EIGEN_DEVICE_FUNC CommaInitializer<Derived> operator<< (const Scalar& s); /** \deprecated it now returns \c *this */ template<unsigned int Added,unsigned int Removed> EIGEN_DEPRECATED const Derived& flagged() const { return derived(); } template<typename OtherDerived> EIGEN_DEVICE_FUNC CommaInitializer<Derived> operator<< (const DenseBase<OtherDerived>& other); typedef Transpose<Derived> TransposeReturnType; EIGEN_DEVICE_FUNC TransposeReturnType transpose(); typedef typename internal::add_const<Transpose<const Derived> >::type ConstTransposeReturnType; EIGEN_DEVICE_FUNC ConstTransposeReturnType transpose() const; EIGEN_DEVICE_FUNC void transposeInPlace(); EIGEN_DEVICE_FUNC static const ConstantReturnType Constant(Index rows, Index cols, const Scalar& value); EIGEN_DEVICE_FUNC static const ConstantReturnType Constant(Index size, const Scalar& value); EIGEN_DEVICE_FUNC static const ConstantReturnType Constant(const Scalar& value); EIGEN_DEVICE_FUNC static const SequentialLinSpacedReturnType LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high); EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType LinSpaced(Index size, const Scalar& low, const Scalar& high); EIGEN_DEVICE_FUNC static const SequentialLinSpacedReturnType LinSpaced(Sequential_t, const Scalar& low, const Scalar& high); EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType LinSpaced(const Scalar& low, const Scalar& high); template<typename CustomNullaryOp> EIGEN_DEVICE_FUNC static const CwiseNullaryOp<CustomNullaryOp, PlainObject> NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func); template<typename CustomNullaryOp> EIGEN_DEVICE_FUNC static const CwiseNullaryOp<CustomNullaryOp, PlainObject> NullaryExpr(Index size, const CustomNullaryOp& func); template<typename CustomNullaryOp> EIGEN_DEVICE_FUNC static const CwiseNullaryOp<CustomNullaryOp, PlainObject> NullaryExpr(const CustomNullaryOp& func); EIGEN_DEVICE_FUNC static const ConstantReturnType Zero(Index rows, Index cols); EIGEN_DEVICE_FUNC static const ConstantReturnType Zero(Index size); EIGEN_DEVICE_FUNC static const ConstantReturnType Zero(); EIGEN_DEVICE_FUNC static const ConstantReturnType Ones(Index rows, Index cols); EIGEN_DEVICE_FUNC static const ConstantReturnType Ones(Index size); EIGEN_DEVICE_FUNC static const ConstantReturnType Ones(); EIGEN_DEVICE_FUNC void fill(const Scalar& value); EIGEN_DEVICE_FUNC Derived& setConstant(const Scalar& value); EIGEN_DEVICE_FUNC Derived& setLinSpaced(Index size, const Scalar& low, const Scalar& high); EIGEN_DEVICE_FUNC Derived& setLinSpaced(const Scalar& low, const Scalar& high); EIGEN_DEVICE_FUNC Derived& setZero(); EIGEN_DEVICE_FUNC Derived& setOnes(); EIGEN_DEVICE_FUNC Derived& setRandom(); template<typename OtherDerived> EIGEN_DEVICE_FUNC bool isApprox(const DenseBase<OtherDerived>& other, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const; EIGEN_DEVICE_FUNC bool isMuchSmallerThan(const RealScalar& other, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const; template<typename OtherDerived> EIGEN_DEVICE_FUNC bool isMuchSmallerThan(const DenseBase<OtherDerived>& other, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const; EIGEN_DEVICE_FUNC bool isApproxToConstant(const Scalar& value, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const; EIGEN_DEVICE_FUNC bool isConstant(const Scalar& value, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const; EIGEN_DEVICE_FUNC bool isZero(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const; EIGEN_DEVICE_FUNC bool isOnes(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const; inline bool hasNaN() const; inline bool allFinite() const; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator*=(const Scalar& other); EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator/=(const Scalar& other); typedef typename internal::add_const_on_value_type<typename internal::eval<Derived>::type>::type EvalReturnType; /** \returns the matrix or vector obtained by evaluating this expression. * * Notice that in the case of a plain matrix or vector (not an expression) this function just returns * a const reference, in order to avoid a useless copy. * * \warning Be carefull with eval() and the auto C++ keyword, as detailed in this \link TopicPitfalls_auto_keyword page \endlink. */ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvalReturnType eval() const { // Even though MSVC does not honor strong inlining when the return type // is a dynamic matrix, we desperately need strong inlining for fixed // size types on MSVC. return typename internal::eval<Derived>::type(derived()); } /** swaps *this with the expression \a other. * */ template<typename OtherDerived> EIGEN_DEVICE_FUNC void swap(const DenseBase<OtherDerived>& other) { EIGEN_STATIC_ASSERT(!OtherDerived::IsPlainObjectBase,THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY); eigen_assert(rows()==other.rows() && cols()==other.cols()); call_assignment(derived(), other.const_cast_derived(), internal::swap_assign_op<Scalar>()); } /** swaps *this with the matrix or array \a other. * */ template<typename OtherDerived> EIGEN_DEVICE_FUNC void swap(PlainObjectBase<OtherDerived>& other) { eigen_assert(rows()==other.rows() && cols()==other.cols()); call_assignment(derived(), other.derived(), internal::swap_assign_op<Scalar>()); } EIGEN_DEVICE_FUNC inline const NestByValue<Derived> nestByValue() const; EIGEN_DEVICE_FUNC inline const ForceAlignedAccess<Derived> forceAlignedAccess() const; EIGEN_DEVICE_FUNC inline ForceAlignedAccess<Derived> forceAlignedAccess(); template<bool Enable> EIGEN_DEVICE_FUNC inline const typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type forceAlignedAccessIf() const; template<bool Enable> EIGEN_DEVICE_FUNC inline typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type forceAlignedAccessIf(); EIGEN_DEVICE_FUNC Scalar sum() const; EIGEN_DEVICE_FUNC Scalar mean() const; EIGEN_DEVICE_FUNC Scalar trace() const; EIGEN_DEVICE_FUNC Scalar prod() const; EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar minCoeff() const; EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar maxCoeff() const; template<typename IndexType> EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar minCoeff(IndexType* row, IndexType* col) const; template<typename IndexType> EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar maxCoeff(IndexType* row, IndexType* col) const; template<typename IndexType> EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar minCoeff(IndexType* index) const; template<typename IndexType> EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar maxCoeff(IndexType* index) const; template<typename BinaryOp> EIGEN_DEVICE_FUNC Scalar redux(const BinaryOp& func) const; template<typename Visitor> EIGEN_DEVICE_FUNC void visit(Visitor& func) const; /** \returns a WithFormat proxy object allowing to print a matrix the with given * format \a fmt. * * See class IOFormat for some examples. * * \sa class IOFormat, class WithFormat */ inline const WithFormat<Derived> format(const IOFormat& fmt) const { return WithFormat<Derived>(derived(), fmt); } /** \returns the unique coefficient of a 1x1 expression */ EIGEN_DEVICE_FUNC CoeffReturnType value() const { EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) eigen_assert(this->rows() == 1 && this->cols() == 1); return derived().coeff(0,0); } EIGEN_DEVICE_FUNC bool all() const; EIGEN_DEVICE_FUNC bool any() const; EIGEN_DEVICE_FUNC Index count() const; typedef VectorwiseOp<Derived, Horizontal> RowwiseReturnType; typedef const VectorwiseOp<const Derived, Horizontal> ConstRowwiseReturnType; typedef VectorwiseOp<Derived, Vertical> ColwiseReturnType; typedef const VectorwiseOp<const Derived, Vertical> ConstColwiseReturnType; /** \returns a VectorwiseOp wrapper of *this providing additional partial reduction operations * * Example: \include MatrixBase_rowwise.cpp * Output: \verbinclude MatrixBase_rowwise.out * * \sa colwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting */ //Code moved here due to a CUDA compiler bug EIGEN_DEVICE_FUNC inline ConstRowwiseReturnType rowwise() const { return ConstRowwiseReturnType(derived()); } EIGEN_DEVICE_FUNC RowwiseReturnType rowwise(); /** \returns a VectorwiseOp wrapper of *this providing additional partial reduction operations * * Example: \include MatrixBase_colwise.cpp * Output: \verbinclude MatrixBase_colwise.out * * \sa rowwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting */ EIGEN_DEVICE_FUNC inline ConstColwiseReturnType colwise() const { return ConstColwiseReturnType(derived()); } EIGEN_DEVICE_FUNC ColwiseReturnType colwise(); typedef CwiseNullaryOp<internal::scalar_random_op<Scalar>,PlainObject> RandomReturnType; static const RandomReturnType Random(Index rows, Index cols); static const RandomReturnType Random(Index size); static const RandomReturnType Random(); template<typename ThenDerived,typename ElseDerived> const Select<Derived,ThenDerived,ElseDerived> select(const DenseBase<ThenDerived>& thenMatrix, const DenseBase<ElseDerived>& elseMatrix) const; template<typename ThenDerived> inline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType> select(const DenseBase<ThenDerived>& thenMatrix, const typename ThenDerived::Scalar& elseScalar) const; template<typename ElseDerived> inline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived > select(const typename ElseDerived::Scalar& thenScalar, const DenseBase<ElseDerived>& elseMatrix) const; template<int p> RealScalar lpNorm() const; template<int RowFactor, int ColFactor> EIGEN_DEVICE_FUNC const Replicate<Derived,RowFactor,ColFactor> replicate() const; /** * \return an expression of the replication of \c *this * * Example: \include MatrixBase_replicate_int_int.cpp * Output: \verbinclude MatrixBase_replicate_int_int.out * * \sa VectorwiseOp::replicate(), DenseBase::replicate<int,int>(), class Replicate */ //Code moved here due to a CUDA compiler bug EIGEN_DEVICE_FUNC const Replicate<Derived, Dynamic, Dynamic> replicate(Index rowFactor, Index colFactor) const { return Replicate<Derived, Dynamic, Dynamic>(derived(), rowFactor, colFactor); } typedef Reverse<Derived, BothDirections> ReverseReturnType; typedef const Reverse<const Derived, BothDirections> ConstReverseReturnType; EIGEN_DEVICE_FUNC ReverseReturnType reverse(); /** This is the const version of reverse(). */ //Code moved here due to a CUDA compiler bug EIGEN_DEVICE_FUNC ConstReverseReturnType reverse() const { return ConstReverseReturnType(derived()); } EIGEN_DEVICE_FUNC void reverseInPlace(); #define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::DenseBase #define EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL #define EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(COND) # include "../plugins/BlockMethods.h" # ifdef EIGEN_DENSEBASE_PLUGIN # include EIGEN_DENSEBASE_PLUGIN # endif #undef EIGEN_CURRENT_STORAGE_BASE_CLASS #undef EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL #undef EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF // disable the use of evalTo for dense objects with a nice compilation error template<typename Dest> EIGEN_DEVICE_FUNC inline void evalTo(Dest& ) const { EIGEN_STATIC_ASSERT((internal::is_same<Dest,void>::value),THE_EVAL_EVALTO_FUNCTION_SHOULD_NEVER_BE_CALLED_FOR_DENSE_OBJECTS); } protected: /** Default constructor. Do nothing. */ EIGEN_DEVICE_FUNC DenseBase() { /* Just checks for self-consistency of the flags. * Only do it when debugging Eigen, as this borders on paranoiac and could slow compilation down */ #ifdef EIGEN_INTERNAL_DEBUGGING EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, int(IsRowMajor)) && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, int(!IsRowMajor))), INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION) #endif } private: EIGEN_DEVICE_FUNC explicit DenseBase(int); EIGEN_DEVICE_FUNC DenseBase(int,int); template<typename OtherDerived> EIGEN_DEVICE_FUNC explicit DenseBase(const DenseBase<OtherDerived>&); }; } // end namespace Eigen #endif // EIGEN_DENSEBASE_H
27,420
43.805556
160
h