Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364 values |
|---|---|---|---|---|---|---|
abess | abess-master/python/include/unsupported/test/special_functions.cpp | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
#include "../Eigen/SpecialFunctions"
template<typename X, typename Y>
void verify_component_wise(const X& x, const Y& y)
{
for(Index i=0; i<x.size(); ++i)
{
if((numext::isfinite)(y(i)))
VERIFY_IS_APPROX( x(i), y(i) );
else if((numext::isnan)(y(i)))
VERIFY((numext::isnan)(x(i)));
else
VERIFY_IS_EQUAL( x(i), y(i) );
}
}
template<typename ArrayType> void array_special_functions()
{
using std::abs;
using std::sqrt;
typedef typename ArrayType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
Scalar plusinf = std::numeric_limits<Scalar>::infinity();
Scalar nan = std::numeric_limits<Scalar>::quiet_NaN();
Index rows = internal::random<Index>(1,30);
Index cols = 1;
// API
{
ArrayType m1 = ArrayType::Random(rows,cols);
#if EIGEN_HAS_C99_MATH
VERIFY_IS_APPROX(m1.lgamma(), lgamma(m1));
VERIFY_IS_APPROX(m1.digamma(), digamma(m1));
VERIFY_IS_APPROX(m1.erf(), erf(m1));
VERIFY_IS_APPROX(m1.erfc(), erfc(m1));
#endif // EIGEN_HAS_C99_MATH
}
#if EIGEN_HAS_C99_MATH
// check special functions (comparing against numpy implementation)
if (!NumTraits<Scalar>::IsComplex)
{
{
ArrayType m1 = ArrayType::Random(rows,cols);
ArrayType m2 = ArrayType::Random(rows,cols);
// Test various propreties of igamma & igammac. These are normalized
// gamma integrals where
// igammac(a, x) = Gamma(a, x) / Gamma(a)
// igamma(a, x) = gamma(a, x) / Gamma(a)
// where Gamma and gamma are considered the standard unnormalized
// upper and lower incomplete gamma functions, respectively.
ArrayType a = m1.abs() + 2;
ArrayType x = m2.abs() + 2;
ArrayType zero = ArrayType::Zero(rows, cols);
ArrayType one = ArrayType::Constant(rows, cols, Scalar(1.0));
ArrayType a_m1 = a - one;
ArrayType Gamma_a_x = Eigen::igammac(a, x) * a.lgamma().exp();
ArrayType Gamma_a_m1_x = Eigen::igammac(a_m1, x) * a_m1.lgamma().exp();
ArrayType gamma_a_x = Eigen::igamma(a, x) * a.lgamma().exp();
ArrayType gamma_a_m1_x = Eigen::igamma(a_m1, x) * a_m1.lgamma().exp();
// Gamma(a, 0) == Gamma(a)
VERIFY_IS_APPROX(Eigen::igammac(a, zero), one);
// Gamma(a, x) + gamma(a, x) == Gamma(a)
VERIFY_IS_APPROX(Gamma_a_x + gamma_a_x, a.lgamma().exp());
// Gamma(a, x) == (a - 1) * Gamma(a-1, x) + x^(a-1) * exp(-x)
VERIFY_IS_APPROX(Gamma_a_x, (a - 1) * Gamma_a_m1_x + x.pow(a-1) * (-x).exp());
// gamma(a, x) == (a - 1) * gamma(a-1, x) - x^(a-1) * exp(-x)
VERIFY_IS_APPROX(gamma_a_x, (a - 1) * gamma_a_m1_x - x.pow(a-1) * (-x).exp());
}
{
// Check exact values of igamma and igammac against a third party calculation.
Scalar a_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)};
Scalar x_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)};
// location i*6+j corresponds to a_s[i], x_s[j].
Scalar igamma_s[][6] = {{0.0, nan, nan, nan, nan, nan},
{0.0, 0.6321205588285578, 0.7768698398515702,
0.9816843611112658, 9.999500016666262e-05, 1.0},
{0.0, 0.4275932955291202, 0.608374823728911,
0.9539882943107686, 7.522076445089201e-07, 1.0},
{0.0, 0.01898815687615381, 0.06564245437845008,
0.5665298796332909, 4.166333347221828e-18, 1.0},
{0.0, 0.9999780593618628, 0.9999899967080838,
0.9999996219837988, 0.9991370418689945, 1.0},
{0.0, 0.0, 0.0, 0.0, 0.0, 0.5042041932513908}};
Scalar igammac_s[][6] = {{nan, nan, nan, nan, nan, nan},
{1.0, 0.36787944117144233, 0.22313016014842982,
0.018315638888734182, 0.9999000049998333, 0.0},
{1.0, 0.5724067044708798, 0.3916251762710878,
0.04601170568923136, 0.9999992477923555, 0.0},
{1.0, 0.9810118431238462, 0.9343575456215499,
0.4334701203667089, 1.0, 0.0},
{1.0, 2.1940638138146658e-05, 1.0003291916285e-05,
3.7801620118431334e-07, 0.0008629581310054535,
0.0},
{1.0, 1.0, 1.0, 1.0, 1.0, 0.49579580674813944}};
for (int i = 0; i < 6; ++i) {
for (int j = 0; j < 6; ++j) {
if ((std::isnan)(igamma_s[i][j])) {
VERIFY((std::isnan)(numext::igamma(a_s[i], x_s[j])));
} else {
VERIFY_IS_APPROX(numext::igamma(a_s[i], x_s[j]), igamma_s[i][j]);
}
if ((std::isnan)(igammac_s[i][j])) {
VERIFY((std::isnan)(numext::igammac(a_s[i], x_s[j])));
} else {
VERIFY_IS_APPROX(numext::igammac(a_s[i], x_s[j]), igammac_s[i][j]);
}
}
}
}
}
#endif // EIGEN_HAS_C99_MATH
// Check the zeta function against scipy.special.zeta
{
ArrayType x(7), q(7), res(7), ref(7);
x << 1.5, 4, 10.5, 10000.5, 3, 1, 0.9;
q << 2, 1.5, 3, 1.0001, -2.5, 1.2345, 1.2345;
ref << 1.61237534869, 0.234848505667, 1.03086757337e-5, 0.367879440865, 0.054102025820864097, plusinf, nan;
CALL_SUBTEST( verify_component_wise(ref, ref); );
CALL_SUBTEST( res = x.zeta(q); verify_component_wise(res, ref); );
CALL_SUBTEST( res = zeta(x,q); verify_component_wise(res, ref); );
}
// digamma
{
ArrayType x(7), res(7), ref(7);
x << 1, 1.5, 4, -10.5, 10000.5, 0, -1;
ref << -0.5772156649015329, 0.03648997397857645, 1.2561176684318, 2.398239129535781, 9.210340372392849, plusinf, plusinf;
CALL_SUBTEST( verify_component_wise(ref, ref); );
CALL_SUBTEST( res = x.digamma(); verify_component_wise(res, ref); );
CALL_SUBTEST( res = digamma(x); verify_component_wise(res, ref); );
}
#if EIGEN_HAS_C99_MATH
{
ArrayType n(11), x(11), res(11), ref(11);
n << 1, 1, 1, 1.5, 17, 31, 28, 8, 42, 147, 170;
x << 2, 3, 25.5, 1.5, 4.7, 11.8, 17.7, 30.2, 15.8, 54.1, 64;
ref << 0.644934066848, 0.394934066848, 0.0399946696496, nan, 293.334565435, 0.445487887616, -2.47810300902e-07, -8.29668781082e-09, -0.434562276666, 0.567742190178, -0.0108615497927;
CALL_SUBTEST( verify_component_wise(ref, ref); );
if(sizeof(RealScalar)>=8) { // double
// Reason for commented line: http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1232
// CALL_SUBTEST( res = x.polygamma(n); verify_component_wise(res, ref); );
CALL_SUBTEST( res = polygamma(n,x); verify_component_wise(res, ref); );
}
else {
// CALL_SUBTEST( res = x.polygamma(n); verify_component_wise(res.head(8), ref.head(8)); );
CALL_SUBTEST( res = polygamma(n,x); verify_component_wise(res.head(8), ref.head(8)); );
}
}
#endif
#if EIGEN_HAS_C99_MATH
{
// Inputs and ground truth generated with scipy via:
// a = np.logspace(-3, 3, 5) - 1e-3
// b = np.logspace(-3, 3, 5) - 1e-3
// x = np.linspace(-0.1, 1.1, 5)
// (full_a, full_b, full_x) = np.vectorize(lambda a, b, x: (a, b, x))(*np.ix_(a, b, x))
// full_a = full_a.flatten().tolist() # same for full_b, full_x
// v = scipy.special.betainc(full_a, full_b, full_x).flatten().tolist()
//
// Note in Eigen, we call betainc with arguments in the order (x, a, b).
ArrayType a(125);
ArrayType b(125);
ArrayType x(125);
ArrayType v(125);
ArrayType res(125);
a << 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999,
0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999,
0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999,
31.62177660168379, 31.62177660168379, 31.62177660168379,
31.62177660168379, 31.62177660168379, 31.62177660168379,
31.62177660168379, 31.62177660168379, 31.62177660168379,
31.62177660168379, 31.62177660168379, 31.62177660168379,
31.62177660168379, 31.62177660168379, 31.62177660168379,
31.62177660168379, 31.62177660168379, 31.62177660168379,
31.62177660168379, 31.62177660168379, 31.62177660168379,
31.62177660168379, 31.62177660168379, 31.62177660168379,
31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999,
999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999,
999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999,
999.999, 999.999, 999.999;
b << 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379,
0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999,
0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379,
31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999,
999.999, 999.999, 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0,
0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999,
0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379,
31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999,
999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379,
0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999,
31.62177660168379, 31.62177660168379, 31.62177660168379,
31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999,
999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379,
0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999,
31.62177660168379, 31.62177660168379, 31.62177660168379,
31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999,
999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379,
0.03062277660168379, 0.03062277660168379, 0.03062277660168379,
0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999,
31.62177660168379, 31.62177660168379, 31.62177660168379,
31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999,
999.999, 999.999;
x << -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5,
0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2,
0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1,
0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1,
-0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8,
1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5,
0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2,
0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1,
0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5,
0.8, 1.1;
v << nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan, 0.47972119876364683, 0.5, 0.5202788012363533, nan, nan,
0.9518683957740043, 0.9789663010413743, 0.9931729188073435, nan, nan,
0.999995949033062, 0.9999999999993698, 0.9999999999999999, nan, nan,
0.9999999999999999, 0.9999999999999999, 0.9999999999999999, nan, nan,
nan, nan, nan, nan, nan, 0.006827081192655869, 0.0210336989586256,
0.04813160422599567, nan, nan, 0.20014344256217678, 0.5000000000000001,
0.7998565574378232, nan, nan, 0.9991401428435834, 0.999999999698403,
0.9999999999999999, nan, nan, 0.9999999999999999, 0.9999999999999999,
0.9999999999999999, nan, nan, nan, nan, nan, nan, nan,
1.0646600232370887e-25, 6.301722877826246e-13, 4.050966937974938e-06,
nan, nan, 7.864342668429763e-23, 3.015969667594166e-10,
0.0008598571564165444, nan, nan, 6.031987710123844e-08,
0.5000000000000007, 0.9999999396801229, nan, nan, 0.9999999999999999,
0.9999999999999999, 0.9999999999999999, nan, nan, nan, nan, nan, nan,
nan, 0.0, 7.029920380986636e-306, 2.2450728208591345e-101, nan, nan,
0.0, 9.275871147869727e-302, 1.2232913026152827e-97, nan, nan, 0.0,
3.0891393081932924e-252, 2.9303043666183996e-60, nan, nan,
2.248913486879199e-196, 0.5000000000004947, 0.9999999999999999, nan;
CALL_SUBTEST(res = betainc(a, b, x);
verify_component_wise(res, v););
}
// Test various properties of betainc
{
ArrayType m1 = ArrayType::Random(32);
ArrayType m2 = ArrayType::Random(32);
ArrayType m3 = ArrayType::Random(32);
ArrayType one = ArrayType::Constant(32, Scalar(1.0));
const Scalar eps = std::numeric_limits<Scalar>::epsilon();
ArrayType a = (m1 * 4.0).exp();
ArrayType b = (m2 * 4.0).exp();
ArrayType x = m3.abs();
// betainc(a, 1, x) == x**a
CALL_SUBTEST(
ArrayType test = betainc(a, one, x);
ArrayType expected = x.pow(a);
verify_component_wise(test, expected););
// betainc(1, b, x) == 1 - (1 - x)**b
CALL_SUBTEST(
ArrayType test = betainc(one, b, x);
ArrayType expected = one - (one - x).pow(b);
verify_component_wise(test, expected););
// betainc(a, b, x) == 1 - betainc(b, a, 1-x)
CALL_SUBTEST(
ArrayType test = betainc(a, b, x) + betainc(b, a, one - x);
ArrayType expected = one;
verify_component_wise(test, expected););
// betainc(a+1, b, x) = betainc(a, b, x) - x**a * (1 - x)**b / (a * beta(a, b))
CALL_SUBTEST(
ArrayType num = x.pow(a) * (one - x).pow(b);
ArrayType denom = a * (a.lgamma() + b.lgamma() - (a + b).lgamma()).exp();
// Add eps to rhs and lhs so that component-wise test doesn't result in
// nans when both outputs are zeros.
ArrayType expected = betainc(a, b, x) - num / denom + eps;
ArrayType test = betainc(a + one, b, x) + eps;
if (sizeof(Scalar) >= 8) { // double
verify_component_wise(test, expected);
} else {
// Reason for limited test: http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1232
verify_component_wise(test.head(8), expected.head(8));
});
// betainc(a, b+1, x) = betainc(a, b, x) + x**a * (1 - x)**b / (b * beta(a, b))
CALL_SUBTEST(
// Add eps to rhs and lhs so that component-wise test doesn't result in
// nans when both outputs are zeros.
ArrayType num = x.pow(a) * (one - x).pow(b);
ArrayType denom = b * (a.lgamma() + b.lgamma() - (a + b).lgamma()).exp();
ArrayType expected = betainc(a, b, x) + num / denom + eps;
ArrayType test = betainc(a, b + one, x) + eps;
verify_component_wise(test, expected););
}
#endif
}
void test_special_functions()
{
CALL_SUBTEST_1(array_special_functions<ArrayXf>());
CALL_SUBTEST_2(array_special_functions<ArrayXd>());
}
| 16,332 | 46.205202 | 186 | cpp |
abess | abess-master/python/include/unsupported/test/splines.cpp | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010-2011 Hauke Heibel <heibel@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
#include <unsupported/Eigen/Splines>
namespace Eigen {
// lets do some explicit instantiations and thus
// force the compilation of all spline functions...
template class Spline<double, 2, Dynamic>;
template class Spline<double, 3, Dynamic>;
template class Spline<double, 2, 2>;
template class Spline<double, 2, 3>;
template class Spline<double, 2, 4>;
template class Spline<double, 2, 5>;
template class Spline<float, 2, Dynamic>;
template class Spline<float, 3, Dynamic>;
template class Spline<float, 3, 2>;
template class Spline<float, 3, 3>;
template class Spline<float, 3, 4>;
template class Spline<float, 3, 5>;
}
Spline<double, 2, Dynamic> closed_spline2d()
{
RowVectorXd knots(12);
knots << 0,
0,
0,
0,
0.867193179093898,
1.660330955342408,
2.605084834823134,
3.484154586374428,
4.252699478956276,
4.252699478956276,
4.252699478956276,
4.252699478956276;
MatrixXd ctrls(8,2);
ctrls << -0.370967741935484, 0.236842105263158,
-0.231401860693277, 0.442245185027632,
0.344361228532831, 0.773369994120753,
0.828990216203802, 0.106550882647595,
0.407270163678382, -1.043452922172848,
-0.488467813584053, -0.390098582530090,
-0.494657189446427, 0.054804824897884,
-0.370967741935484, 0.236842105263158;
ctrls.transposeInPlace();
return Spline<double, 2, Dynamic>(knots, ctrls);
}
/* create a reference spline */
Spline<double, 3, Dynamic> spline3d()
{
RowVectorXd knots(11);
knots << 0,
0,
0,
0.118997681558377,
0.162611735194631,
0.498364051982143,
0.655098003973841,
0.679702676853675,
1.000000000000000,
1.000000000000000,
1.000000000000000;
MatrixXd ctrls(8,3);
ctrls << 0.959743958516081, 0.340385726666133, 0.585267750979777,
0.223811939491137, 0.751267059305653, 0.255095115459269,
0.505957051665142, 0.699076722656686, 0.890903252535799,
0.959291425205444, 0.547215529963803, 0.138624442828679,
0.149294005559057, 0.257508254123736, 0.840717255983663,
0.254282178971531, 0.814284826068816, 0.243524968724989,
0.929263623187228, 0.349983765984809, 0.196595250431208,
0.251083857976031, 0.616044676146639, 0.473288848902729;
ctrls.transposeInPlace();
return Spline<double, 3, Dynamic>(knots, ctrls);
}
/* compares evaluations against known results */
void eval_spline3d()
{
Spline3d spline = spline3d();
RowVectorXd u(10);
u << 0.351659507062997,
0.830828627896291,
0.585264091152724,
0.549723608291140,
0.917193663829810,
0.285839018820374,
0.757200229110721,
0.753729094278495,
0.380445846975357,
0.567821640725221;
MatrixXd pts(10,3);
pts << 0.707620811535916, 0.510258911240815, 0.417485437023409,
0.603422256426978, 0.529498282727551, 0.270351549348981,
0.228364197569334, 0.423745615677815, 0.637687289287490,
0.275556796335168, 0.350856706427970, 0.684295784598905,
0.514519311047655, 0.525077224890754, 0.351628308305896,
0.724152914315666, 0.574461155457304, 0.469860285484058,
0.529365063753288, 0.613328702656816, 0.237837040141739,
0.522469395136878, 0.619099658652895, 0.237139665242069,
0.677357023849552, 0.480655768435853, 0.422227610314397,
0.247046593173758, 0.380604672404750, 0.670065791405019;
pts.transposeInPlace();
for (int i=0; i<u.size(); ++i)
{
Vector3d pt = spline(u(i));
VERIFY( (pt - pts.col(i)).norm() < 1e-14 );
}
}
/* compares evaluations on corner cases */
void eval_spline3d_onbrks()
{
Spline3d spline = spline3d();
RowVectorXd u = spline.knots();
MatrixXd pts(11,3);
pts << 0.959743958516081, 0.340385726666133, 0.585267750979777,
0.959743958516081, 0.340385726666133, 0.585267750979777,
0.959743958516081, 0.340385726666133, 0.585267750979777,
0.430282980289940, 0.713074680056118, 0.720373307943349,
0.558074875553060, 0.681617921034459, 0.804417124839942,
0.407076008291750, 0.349707710518163, 0.617275937419545,
0.240037008286602, 0.738739390398014, 0.324554153129411,
0.302434111480572, 0.781162443963899, 0.240177089094644,
0.251083857976031, 0.616044676146639, 0.473288848902729,
0.251083857976031, 0.616044676146639, 0.473288848902729,
0.251083857976031, 0.616044676146639, 0.473288848902729;
pts.transposeInPlace();
for (int i=0; i<u.size(); ++i)
{
Vector3d pt = spline(u(i));
VERIFY( (pt - pts.col(i)).norm() < 1e-14 );
}
}
void eval_closed_spline2d()
{
Spline2d spline = closed_spline2d();
RowVectorXd u(12);
u << 0,
0.332457030395796,
0.356467130532952,
0.453562180176215,
0.648017921874804,
0.973770235555003,
1.882577647219307,
2.289408593930498,
3.511951429883045,
3.884149321369450,
4.236261590369414,
4.252699478956276;
MatrixXd pts(12,2);
pts << -0.370967741935484, 0.236842105263158,
-0.152576775123250, 0.448975001279334,
-0.133417538277668, 0.461615613865667,
-0.053199060826740, 0.507630360006299,
0.114249591147281, 0.570414135097409,
0.377810316891987, 0.560497102875315,
0.665052120135908, -0.157557441109611,
0.516006487053228, -0.559763292174825,
-0.379486035348887, -0.331959640488223,
-0.462034726249078, -0.039105670080824,
-0.378730600917982, 0.225127015099919,
-0.370967741935484, 0.236842105263158;
pts.transposeInPlace();
for (int i=0; i<u.size(); ++i)
{
Vector2d pt = spline(u(i));
VERIFY( (pt - pts.col(i)).norm() < 1e-14 );
}
}
void check_global_interpolation2d()
{
typedef Spline2d::PointType PointType;
typedef Spline2d::KnotVectorType KnotVectorType;
typedef Spline2d::ControlPointVectorType ControlPointVectorType;
ControlPointVectorType points = ControlPointVectorType::Random(2,100);
KnotVectorType chord_lengths; // knot parameters
Eigen::ChordLengths(points, chord_lengths);
// interpolation without knot parameters
{
const Spline2d spline = SplineFitting<Spline2d>::Interpolate(points,3);
for (Eigen::DenseIndex i=0; i<points.cols(); ++i)
{
PointType pt = spline( chord_lengths(i) );
PointType ref = points.col(i);
VERIFY( (pt - ref).matrix().norm() < 1e-14 );
}
}
// interpolation with given knot parameters
{
const Spline2d spline = SplineFitting<Spline2d>::Interpolate(points,3,chord_lengths);
for (Eigen::DenseIndex i=0; i<points.cols(); ++i)
{
PointType pt = spline( chord_lengths(i) );
PointType ref = points.col(i);
VERIFY( (pt - ref).matrix().norm() < 1e-14 );
}
}
}
void check_global_interpolation_with_derivatives2d()
{
typedef Spline2d::PointType PointType;
typedef Spline2d::KnotVectorType KnotVectorType;
const Eigen::DenseIndex numPoints = 100;
const unsigned int dimension = 2;
const unsigned int degree = 3;
ArrayXXd points = ArrayXXd::Random(dimension, numPoints);
KnotVectorType knots;
Eigen::ChordLengths(points, knots);
ArrayXXd derivatives = ArrayXXd::Random(dimension, numPoints);
VectorXd derivativeIndices(numPoints);
for (Eigen::DenseIndex i = 0; i < numPoints; ++i)
derivativeIndices(i) = static_cast<double>(i);
const Spline2d spline = SplineFitting<Spline2d>::InterpolateWithDerivatives(
points, derivatives, derivativeIndices, degree);
for (Eigen::DenseIndex i = 0; i < points.cols(); ++i)
{
PointType point = spline(knots(i));
PointType referencePoint = points.col(i);
VERIFY_IS_APPROX(point, referencePoint);
PointType derivative = spline.derivatives(knots(i), 1).col(1);
PointType referenceDerivative = derivatives.col(i);
VERIFY_IS_APPROX(derivative, referenceDerivative);
}
}
void test_splines()
{
for (int i = 0; i < g_repeat; ++i)
{
CALL_SUBTEST( eval_spline3d() );
CALL_SUBTEST( eval_spline3d_onbrks() );
CALL_SUBTEST( eval_closed_spline2d() );
CALL_SUBTEST( check_global_interpolation2d() );
CALL_SUBTEST( check_global_interpolation_with_derivatives2d() );
}
}
| 8,521 | 29.219858 | 91 | cpp |
abess | abess-master/python/include/unsupported/test/mpreal/mpreal.h | /*
MPFR C++: Multi-precision floating point number class for C++.
Based on MPFR library: http://mpfr.org
Project homepage: http://www.holoborodko.com/pavel/mpfr
Contact e-mail: pavel@holoborodko.com
Copyright (c) 2008-2015 Pavel Holoborodko
Contributors:
Dmitriy Gubanov, Konstantin Holoborodko, Brian Gladman,
Helmut Jarausch, Fokko Beekhof, Ulrich Mutze, Heinz van Saanen,
Pere Constans, Peter van Hoof, Gael Guennebaud, Tsai Chia Cheng,
Alexei Zubanov, Jauhien Piatlicki, Victor Berger, John Westwood,
Petr Aleksandrov, Orion Poplawski, Charles Karney, Arash Partow,
Rodney James, Jorge Leitao.
Licensing:
(A) MPFR C++ is under GNU General Public License ("GPL").
(B) Non-free licenses may also be purchased from the author, for users who
do not want their programs protected by the GPL.
The non-free licenses are for users that wish to use MPFR C++ in
their products but are unwilling to release their software
under the GPL (which would require them to release source code
and allow free redistribution).
Such users can purchase an unlimited-use license from the author.
Contact us for more details.
GNU General Public License ("GPL") copyright permissions statement:
**************************************************************************
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MPREAL_H__
#define __MPREAL_H__
#include <string>
#include <iostream>
#include <sstream>
#include <stdexcept>
#include <cfloat>
#include <cmath>
#include <cstring>
#include <limits>
#include <complex>
#include <algorithm>
// Options
#define MPREAL_HAVE_MSVC_DEBUGVIEW // Enable Debugger Visualizer for "Debug" builds in MSVC.
#define MPREAL_HAVE_DYNAMIC_STD_NUMERIC_LIMITS // Enable extended std::numeric_limits<mpfr::mpreal> specialization.
// Meaning that "digits", "round_style" and similar members are defined as functions, not constants.
// See std::numeric_limits<mpfr::mpreal> at the end of the file for more information.
// Library version
#define MPREAL_VERSION_MAJOR 3
#define MPREAL_VERSION_MINOR 6
#define MPREAL_VERSION_PATCHLEVEL 2
#define MPREAL_VERSION_STRING "3.6.2"
// Detect compiler using signatures from http://predef.sourceforge.net/
#if defined(__GNUC__)
#define IsInf(x) (isinf)(x) // GNU C++/Intel ICC compiler on Linux
#elif defined(_MSC_VER) // Microsoft Visual C++
#define IsInf(x) (!_finite(x))
#else
#define IsInf(x) (std::isinf)(x) // GNU C/C++ (and/or other compilers), just hope for C99 conformance
#endif
// A Clang feature extension to determine compiler features.
#ifndef __has_feature
#define __has_feature(x) 0
#endif
// Detect support for r-value references (move semantic). Borrowed from Eigen.
#if (__has_feature(cxx_rvalue_references) || \
defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L || \
(defined(_MSC_VER) && _MSC_VER >= 1600))
#define MPREAL_HAVE_MOVE_SUPPORT
// Use fields in mpfr_t structure to check if it was initialized / set dummy initialization
#define mpfr_is_initialized(x) (0 != (x)->_mpfr_d)
#define mpfr_set_uninitialized(x) ((x)->_mpfr_d = 0 )
#endif
// Detect support for explicit converters.
#if (__has_feature(cxx_explicit_conversions) || \
(defined(__GXX_EXPERIMENTAL_CXX0X__) && __GNUC_MINOR__ >= 5) || __cplusplus >= 201103L || \
(defined(_MSC_VER) && _MSC_VER >= 1800))
#define MPREAL_HAVE_EXPLICIT_CONVERTERS
#endif
#define MPFR_USE_INTMAX_T // Enable 64-bit integer types - should be defined before mpfr.h
#if defined(MPREAL_HAVE_MSVC_DEBUGVIEW) && defined(_MSC_VER) && defined(_DEBUG)
#define MPREAL_MSVC_DEBUGVIEW_CODE DebugView = toString();
#define MPREAL_MSVC_DEBUGVIEW_DATA std::string DebugView;
#else
#define MPREAL_MSVC_DEBUGVIEW_CODE
#define MPREAL_MSVC_DEBUGVIEW_DATA
#endif
#include <mpfr.h>
#if (MPFR_VERSION < MPFR_VERSION_NUM(3,0,0))
#include <cstdlib> // Needed for random()
#endif
// Less important options
#define MPREAL_DOUBLE_BITS_OVERFLOW -1 // Triggers overflow exception during conversion to double if mpreal
// cannot fit in MPREAL_DOUBLE_BITS_OVERFLOW bits
// = -1 disables overflow checks (default)
// Fast replacement for mpfr_set_zero(x, +1):
// (a) uses low-level data members, might not be compatible with new versions of MPFR
// (b) sign is not set, add (x)->_mpfr_sign = 1;
#define mpfr_set_zero_fast(x) ((x)->_mpfr_exp = __MPFR_EXP_ZERO)
#if defined(__GNUC__)
#define MPREAL_PERMISSIVE_EXPR __extension__
#else
#define MPREAL_PERMISSIVE_EXPR
#endif
namespace mpfr {
class mpreal {
private:
mpfr_t mp;
public:
// Get default rounding mode & precision
inline static mp_rnd_t get_default_rnd() { return (mp_rnd_t)(mpfr_get_default_rounding_mode()); }
inline static mp_prec_t get_default_prec() { return mpfr_get_default_prec(); }
// Constructors && type conversions
mpreal();
mpreal(const mpreal& u);
mpreal(const mpf_t u);
mpreal(const mpz_t u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
mpreal(const mpq_t u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
mpreal(const double u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
mpreal(const long double u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
mpreal(const unsigned long long int u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
mpreal(const long long int u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
mpreal(const unsigned long int u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
mpreal(const unsigned int u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
mpreal(const long int u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
mpreal(const int u, mp_prec_t prec = mpreal::get_default_prec(), mp_rnd_t mode = mpreal::get_default_rnd());
// Construct mpreal from mpfr_t structure.
// shared = true allows to avoid deep copy, so that mpreal and 'u' share the same data & pointers.
mpreal(const mpfr_t u, bool shared = false);
mpreal(const char* s, mp_prec_t prec = mpreal::get_default_prec(), int base = 10, mp_rnd_t mode = mpreal::get_default_rnd());
mpreal(const std::string& s, mp_prec_t prec = mpreal::get_default_prec(), int base = 10, mp_rnd_t mode = mpreal::get_default_rnd());
~mpreal();
#ifdef MPREAL_HAVE_MOVE_SUPPORT
mpreal& operator=(mpreal&& v);
mpreal(mpreal&& u);
#endif
// Operations
// =
// +, -, *, /, ++, --, <<, >>
// *=, +=, -=, /=,
// <, >, ==, <=, >=
// =
mpreal& operator=(const mpreal& v);
mpreal& operator=(const mpf_t v);
mpreal& operator=(const mpz_t v);
mpreal& operator=(const mpq_t v);
mpreal& operator=(const long double v);
mpreal& operator=(const double v);
mpreal& operator=(const unsigned long int v);
mpreal& operator=(const unsigned long long int v);
mpreal& operator=(const long long int v);
mpreal& operator=(const unsigned int v);
mpreal& operator=(const long int v);
mpreal& operator=(const int v);
mpreal& operator=(const char* s);
mpreal& operator=(const std::string& s);
template <typename real_t> mpreal& operator= (const std::complex<real_t>& z);
// +
mpreal& operator+=(const mpreal& v);
mpreal& operator+=(const mpf_t v);
mpreal& operator+=(const mpz_t v);
mpreal& operator+=(const mpq_t v);
mpreal& operator+=(const long double u);
mpreal& operator+=(const double u);
mpreal& operator+=(const unsigned long int u);
mpreal& operator+=(const unsigned int u);
mpreal& operator+=(const long int u);
mpreal& operator+=(const int u);
mpreal& operator+=(const long long int u);
mpreal& operator+=(const unsigned long long int u);
mpreal& operator-=(const long long int u);
mpreal& operator-=(const unsigned long long int u);
mpreal& operator*=(const long long int u);
mpreal& operator*=(const unsigned long long int u);
mpreal& operator/=(const long long int u);
mpreal& operator/=(const unsigned long long int u);
const mpreal operator+() const;
mpreal& operator++ ();
const mpreal operator++ (int);
// -
mpreal& operator-=(const mpreal& v);
mpreal& operator-=(const mpz_t v);
mpreal& operator-=(const mpq_t v);
mpreal& operator-=(const long double u);
mpreal& operator-=(const double u);
mpreal& operator-=(const unsigned long int u);
mpreal& operator-=(const unsigned int u);
mpreal& operator-=(const long int u);
mpreal& operator-=(const int u);
const mpreal operator-() const;
friend const mpreal operator-(const unsigned long int b, const mpreal& a);
friend const mpreal operator-(const unsigned int b, const mpreal& a);
friend const mpreal operator-(const long int b, const mpreal& a);
friend const mpreal operator-(const int b, const mpreal& a);
friend const mpreal operator-(const double b, const mpreal& a);
mpreal& operator-- ();
const mpreal operator-- (int);
// *
mpreal& operator*=(const mpreal& v);
mpreal& operator*=(const mpz_t v);
mpreal& operator*=(const mpq_t v);
mpreal& operator*=(const long double v);
mpreal& operator*=(const double v);
mpreal& operator*=(const unsigned long int v);
mpreal& operator*=(const unsigned int v);
mpreal& operator*=(const long int v);
mpreal& operator*=(const int v);
// /
mpreal& operator/=(const mpreal& v);
mpreal& operator/=(const mpz_t v);
mpreal& operator/=(const mpq_t v);
mpreal& operator/=(const long double v);
mpreal& operator/=(const double v);
mpreal& operator/=(const unsigned long int v);
mpreal& operator/=(const unsigned int v);
mpreal& operator/=(const long int v);
mpreal& operator/=(const int v);
friend const mpreal operator/(const unsigned long int b, const mpreal& a);
friend const mpreal operator/(const unsigned int b, const mpreal& a);
friend const mpreal operator/(const long int b, const mpreal& a);
friend const mpreal operator/(const int b, const mpreal& a);
friend const mpreal operator/(const double b, const mpreal& a);
//<<= Fast Multiplication by 2^u
mpreal& operator<<=(const unsigned long int u);
mpreal& operator<<=(const unsigned int u);
mpreal& operator<<=(const long int u);
mpreal& operator<<=(const int u);
//>>= Fast Division by 2^u
mpreal& operator>>=(const unsigned long int u);
mpreal& operator>>=(const unsigned int u);
mpreal& operator>>=(const long int u);
mpreal& operator>>=(const int u);
// Type Conversion operators
bool toBool ( ) const;
long toLong (mp_rnd_t mode = GMP_RNDZ) const;
unsigned long toULong (mp_rnd_t mode = GMP_RNDZ) const;
long long toLLong (mp_rnd_t mode = GMP_RNDZ) const;
unsigned long long toULLong (mp_rnd_t mode = GMP_RNDZ) const;
float toFloat (mp_rnd_t mode = GMP_RNDN) const;
double toDouble (mp_rnd_t mode = GMP_RNDN) const;
long double toLDouble (mp_rnd_t mode = GMP_RNDN) const;
#if defined (MPREAL_HAVE_EXPLICIT_CONVERTERS)
explicit operator bool () const { return toBool(); }
explicit operator int () const { return int(toLong()); }
explicit operator long () const { return toLong(); }
explicit operator long long () const { return toLLong(); }
explicit operator unsigned () const { return unsigned(toULong()); }
explicit operator unsigned long () const { return toULong(); }
explicit operator unsigned long long () const { return toULLong(); }
explicit operator float () const { return toFloat(); }
explicit operator double () const { return toDouble(); }
explicit operator long double () const { return toLDouble(); }
#endif
// Get raw pointers so that mpreal can be directly used in raw mpfr_* functions
::mpfr_ptr mpfr_ptr();
::mpfr_srcptr mpfr_ptr() const;
::mpfr_srcptr mpfr_srcptr() const;
// Convert mpreal to string with n significant digits in base b
// n = -1 -> convert with the maximum available digits
std::string toString(int n = -1, int b = 10, mp_rnd_t mode = mpreal::get_default_rnd()) const;
#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0))
std::string toString(const std::string& format) const;
#endif
std::ostream& output(std::ostream& os) const;
// Math Functions
friend const mpreal sqr (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal sqrt(const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal sqrt(const unsigned long int v, mp_rnd_t rnd_mode);
friend const mpreal cbrt(const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal root(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode);
friend const mpreal pow (const mpreal& a, const mpreal& b, mp_rnd_t rnd_mode);
friend const mpreal pow (const mpreal& a, const mpz_t b, mp_rnd_t rnd_mode);
friend const mpreal pow (const mpreal& a, const unsigned long int b, mp_rnd_t rnd_mode);
friend const mpreal pow (const mpreal& a, const long int b, mp_rnd_t rnd_mode);
friend const mpreal pow (const unsigned long int a, const mpreal& b, mp_rnd_t rnd_mode);
friend const mpreal pow (const unsigned long int a, const unsigned long int b, mp_rnd_t rnd_mode);
friend const mpreal fabs(const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal abs(const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal dim(const mpreal& a, const mpreal& b, mp_rnd_t rnd_mode);
friend inline const mpreal mul_2ui(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode);
friend inline const mpreal mul_2si(const mpreal& v, long int k, mp_rnd_t rnd_mode);
friend inline const mpreal div_2ui(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode);
friend inline const mpreal div_2si(const mpreal& v, long int k, mp_rnd_t rnd_mode);
friend int cmpabs(const mpreal& a,const mpreal& b);
friend const mpreal log (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal log2 (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal logb (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal log10(const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal exp (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal exp2 (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal exp10(const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal log1p(const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal expm1(const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal cos(const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal sin(const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal tan(const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal sec(const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal csc(const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal cot(const mpreal& v, mp_rnd_t rnd_mode);
friend int sin_cos(mpreal& s, mpreal& c, const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal acos (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal asin (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal atan (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal atan2 (const mpreal& y, const mpreal& x, mp_rnd_t rnd_mode);
friend const mpreal acot (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal asec (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal acsc (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal cosh (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal sinh (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal tanh (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal sech (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal csch (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal coth (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal acosh (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal asinh (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal atanh (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal acoth (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal asech (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal acsch (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal hypot (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode);
friend const mpreal fac_ui (unsigned long int v, mp_prec_t prec, mp_rnd_t rnd_mode);
friend const mpreal eint (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal gamma (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal tgamma (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal lngamma (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal lgamma (const mpreal& v, int *signp, mp_rnd_t rnd_mode);
friend const mpreal zeta (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal erf (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal erfc (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal besselj0 (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal besselj1 (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal besseljn (long n, const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal bessely0 (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal bessely1 (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal besselyn (long n, const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal fma (const mpreal& v1, const mpreal& v2, const mpreal& v3, mp_rnd_t rnd_mode);
friend const mpreal fms (const mpreal& v1, const mpreal& v2, const mpreal& v3, mp_rnd_t rnd_mode);
friend const mpreal agm (const mpreal& v1, const mpreal& v2, mp_rnd_t rnd_mode);
friend const mpreal sum (const mpreal tab[], const unsigned long int n, int& status, mp_rnd_t rnd_mode);
friend int sgn(const mpreal& v); // returns -1 or +1
// MPFR 2.4.0 Specifics
#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0))
friend int sinh_cosh (mpreal& s, mpreal& c, const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal li2 (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal fmod (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode);
friend const mpreal rec_sqrt (const mpreal& v, mp_rnd_t rnd_mode);
// MATLAB's semantic equivalents
friend const mpreal rem (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode); // Remainder after division
friend const mpreal mod (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode); // Modulus after division
#endif
#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0))
friend const mpreal digamma (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal ai (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal urandom (gmp_randstate_t& state, mp_rnd_t rnd_mode); // use gmp_randinit_default() to init state, gmp_randclear() to clear
#endif
#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,1,0))
friend const mpreal grandom (gmp_randstate_t& state, mp_rnd_t rnd_mode); // use gmp_randinit_default() to init state, gmp_randclear() to clear
friend const mpreal grandom (unsigned int seed);
#endif
// Uniformly distributed random number generation in [0,1] using
// Mersenne-Twister algorithm by default.
// Use parameter to setup seed, e.g.: random((unsigned)time(NULL))
// Check urandom() for more precise control.
friend const mpreal random(unsigned int seed);
// Splits mpreal value into fractional and integer parts.
// Returns fractional part and stores integer part in n.
friend const mpreal modf(const mpreal& v, mpreal& n);
// Constants
// don't forget to call mpfr_free_cache() for every thread where you are using const-functions
friend const mpreal const_log2 (mp_prec_t prec, mp_rnd_t rnd_mode);
friend const mpreal const_pi (mp_prec_t prec, mp_rnd_t rnd_mode);
friend const mpreal const_euler (mp_prec_t prec, mp_rnd_t rnd_mode);
friend const mpreal const_catalan (mp_prec_t prec, mp_rnd_t rnd_mode);
// returns +inf iff sign>=0 otherwise -inf
friend const mpreal const_infinity(int sign, mp_prec_t prec);
// Output/ Input
friend std::ostream& operator<<(std::ostream& os, const mpreal& v);
friend std::istream& operator>>(std::istream& is, mpreal& v);
// Integer Related Functions
friend const mpreal rint (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal ceil (const mpreal& v);
friend const mpreal floor(const mpreal& v);
friend const mpreal round(const mpreal& v);
friend const mpreal trunc(const mpreal& v);
friend const mpreal rint_ceil (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal rint_floor (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal rint_round (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal rint_trunc (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal frac (const mpreal& v, mp_rnd_t rnd_mode);
friend const mpreal remainder ( const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode);
friend const mpreal remquo (long* q, const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode);
// Miscellaneous Functions
friend const mpreal nexttoward (const mpreal& x, const mpreal& y);
friend const mpreal nextabove (const mpreal& x);
friend const mpreal nextbelow (const mpreal& x);
// use gmp_randinit_default() to init state, gmp_randclear() to clear
friend const mpreal urandomb (gmp_randstate_t& state);
// MPFR < 2.4.2 Specifics
#if (MPFR_VERSION <= MPFR_VERSION_NUM(2,4,2))
friend const mpreal random2 (mp_size_t size, mp_exp_t exp);
#endif
// Instance Checkers
friend bool (isnan) (const mpreal& v);
friend bool (isinf) (const mpreal& v);
friend bool (isfinite) (const mpreal& v);
friend bool isnum (const mpreal& v);
friend bool iszero (const mpreal& v);
friend bool isint (const mpreal& v);
#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0))
friend bool isregular(const mpreal& v);
#endif
// Set/Get instance properties
inline mp_prec_t get_prec() const;
inline void set_prec(mp_prec_t prec, mp_rnd_t rnd_mode = get_default_rnd()); // Change precision with rounding mode
// Aliases for get_prec(), set_prec() - needed for compatibility with std::complex<mpreal> interface
inline mpreal& setPrecision(int Precision, mp_rnd_t RoundingMode = get_default_rnd());
inline int getPrecision() const;
// Set mpreal to +/- inf, NaN, +/-0
mpreal& setInf (int Sign = +1);
mpreal& setNan ();
mpreal& setZero (int Sign = +1);
mpreal& setSign (int Sign, mp_rnd_t RoundingMode = get_default_rnd());
//Exponent
mp_exp_t get_exp();
int set_exp(mp_exp_t e);
int check_range (int t, mp_rnd_t rnd_mode = get_default_rnd());
int subnormalize (int t, mp_rnd_t rnd_mode = get_default_rnd());
// Inexact conversion from float
inline bool fits_in_bits(double x, int n);
// Set/Get global properties
static void set_default_prec(mp_prec_t prec);
static void set_default_rnd(mp_rnd_t rnd_mode);
static mp_exp_t get_emin (void);
static mp_exp_t get_emax (void);
static mp_exp_t get_emin_min (void);
static mp_exp_t get_emin_max (void);
static mp_exp_t get_emax_min (void);
static mp_exp_t get_emax_max (void);
static int set_emin (mp_exp_t exp);
static int set_emax (mp_exp_t exp);
// Efficient swapping of two mpreal values - needed for std algorithms
friend void swap(mpreal& x, mpreal& y);
friend const mpreal fmax(const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode);
friend const mpreal fmin(const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode);
private:
// Human friendly Debug Preview in Visual Studio.
// Put one of these lines:
//
// mpfr::mpreal=<DebugView> ; Show value only
// mpfr::mpreal=<DebugView>, <mp[0]._mpfr_prec,u>bits ; Show value & precision
//
// at the beginning of
// [Visual Studio Installation Folder]\Common7\Packages\Debugger\autoexp.dat
MPREAL_MSVC_DEBUGVIEW_DATA
// "Smart" resources deallocation. Checks if instance initialized before deletion.
void clear(::mpfr_ptr);
};
//////////////////////////////////////////////////////////////////////////
// Exceptions
class conversion_overflow : public std::exception {
public:
std::string why() { return "inexact conversion from floating point"; }
};
//////////////////////////////////////////////////////////////////////////
// Constructors & converters
// Default constructor: creates mp number and initializes it to 0.
inline mpreal::mpreal()
{
mpfr_init2(mpfr_ptr(), mpreal::get_default_prec());
mpfr_set_zero_fast(mpfr_ptr());
MPREAL_MSVC_DEBUGVIEW_CODE;
}
inline mpreal::mpreal(const mpreal& u)
{
mpfr_init2(mpfr_ptr(),mpfr_get_prec(u.mpfr_srcptr()));
mpfr_set (mpfr_ptr(),u.mpfr_srcptr(),mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
}
#ifdef MPREAL_HAVE_MOVE_SUPPORT
inline mpreal::mpreal(mpreal&& other)
{
mpfr_set_uninitialized(mpfr_ptr()); // make sure "other" holds no pointer to actual data
mpfr_swap(mpfr_ptr(), other.mpfr_ptr());
MPREAL_MSVC_DEBUGVIEW_CODE;
}
inline mpreal& mpreal::operator=(mpreal&& other)
{
mpfr_swap(mpfr_ptr(), other.mpfr_ptr());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
#endif
inline mpreal::mpreal(const mpfr_t u, bool shared)
{
if(shared)
{
std::memcpy(mpfr_ptr(), u, sizeof(mpfr_t));
}
else
{
mpfr_init2(mpfr_ptr(), mpfr_get_prec(u));
mpfr_set (mpfr_ptr(), u, mpreal::get_default_rnd());
}
MPREAL_MSVC_DEBUGVIEW_CODE;
}
inline mpreal::mpreal(const mpf_t u)
{
mpfr_init2(mpfr_ptr(),(mp_prec_t) mpf_get_prec(u)); // (gmp: mp_bitcnt_t) unsigned long -> long (mpfr: mp_prec_t)
mpfr_set_f(mpfr_ptr(),u,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
}
inline mpreal::mpreal(const mpz_t u, mp_prec_t prec, mp_rnd_t mode)
{
mpfr_init2(mpfr_ptr(), prec);
mpfr_set_z(mpfr_ptr(), u, mode);
MPREAL_MSVC_DEBUGVIEW_CODE;
}
inline mpreal::mpreal(const mpq_t u, mp_prec_t prec, mp_rnd_t mode)
{
mpfr_init2(mpfr_ptr(), prec);
mpfr_set_q(mpfr_ptr(), u, mode);
MPREAL_MSVC_DEBUGVIEW_CODE;
}
inline mpreal::mpreal(const double u, mp_prec_t prec, mp_rnd_t mode)
{
mpfr_init2(mpfr_ptr(), prec);
#if (MPREAL_DOUBLE_BITS_OVERFLOW > -1)
if(fits_in_bits(u, MPREAL_DOUBLE_BITS_OVERFLOW))
{
mpfr_set_d(mpfr_ptr(), u, mode);
}else
throw conversion_overflow();
#else
mpfr_set_d(mpfr_ptr(), u, mode);
#endif
MPREAL_MSVC_DEBUGVIEW_CODE;
}
inline mpreal::mpreal(const long double u, mp_prec_t prec, mp_rnd_t mode)
{
mpfr_init2 (mpfr_ptr(), prec);
mpfr_set_ld(mpfr_ptr(), u, mode);
MPREAL_MSVC_DEBUGVIEW_CODE;
}
inline mpreal::mpreal(const unsigned long long int u, mp_prec_t prec, mp_rnd_t mode)
{
mpfr_init2 (mpfr_ptr(), prec);
mpfr_set_uj(mpfr_ptr(), u, mode);
MPREAL_MSVC_DEBUGVIEW_CODE;
}
inline mpreal::mpreal(const long long int u, mp_prec_t prec, mp_rnd_t mode)
{
mpfr_init2 (mpfr_ptr(), prec);
mpfr_set_sj(mpfr_ptr(), u, mode);
MPREAL_MSVC_DEBUGVIEW_CODE;
}
inline mpreal::mpreal(const unsigned long int u, mp_prec_t prec, mp_rnd_t mode)
{
mpfr_init2 (mpfr_ptr(), prec);
mpfr_set_ui(mpfr_ptr(), u, mode);
MPREAL_MSVC_DEBUGVIEW_CODE;
}
inline mpreal::mpreal(const unsigned int u, mp_prec_t prec, mp_rnd_t mode)
{
mpfr_init2 (mpfr_ptr(), prec);
mpfr_set_ui(mpfr_ptr(), u, mode);
MPREAL_MSVC_DEBUGVIEW_CODE;
}
inline mpreal::mpreal(const long int u, mp_prec_t prec, mp_rnd_t mode)
{
mpfr_init2 (mpfr_ptr(), prec);
mpfr_set_si(mpfr_ptr(), u, mode);
MPREAL_MSVC_DEBUGVIEW_CODE;
}
inline mpreal::mpreal(const int u, mp_prec_t prec, mp_rnd_t mode)
{
mpfr_init2 (mpfr_ptr(), prec);
mpfr_set_si(mpfr_ptr(), u, mode);
MPREAL_MSVC_DEBUGVIEW_CODE;
}
inline mpreal::mpreal(const char* s, mp_prec_t prec, int base, mp_rnd_t mode)
{
mpfr_init2 (mpfr_ptr(), prec);
mpfr_set_str(mpfr_ptr(), s, base, mode);
MPREAL_MSVC_DEBUGVIEW_CODE;
}
inline mpreal::mpreal(const std::string& s, mp_prec_t prec, int base, mp_rnd_t mode)
{
mpfr_init2 (mpfr_ptr(), prec);
mpfr_set_str(mpfr_ptr(), s.c_str(), base, mode);
MPREAL_MSVC_DEBUGVIEW_CODE;
}
inline void mpreal::clear(::mpfr_ptr x)
{
#ifdef MPREAL_HAVE_MOVE_SUPPORT
if(mpfr_is_initialized(x))
#endif
mpfr_clear(x);
}
inline mpreal::~mpreal()
{
clear(mpfr_ptr());
}
// internal namespace needed for template magic
namespace internal{
// Use SFINAE to restrict arithmetic operations instantiation only for numeric types
// This is needed for smooth integration with libraries based on expression templates, like Eigen.
// TODO: Do the same for boolean operators.
template <typename ArgumentType> struct result_type {};
template <> struct result_type<mpreal> {typedef mpreal type;};
template <> struct result_type<mpz_t> {typedef mpreal type;};
template <> struct result_type<mpq_t> {typedef mpreal type;};
template <> struct result_type<long double> {typedef mpreal type;};
template <> struct result_type<double> {typedef mpreal type;};
template <> struct result_type<unsigned long int> {typedef mpreal type;};
template <> struct result_type<unsigned int> {typedef mpreal type;};
template <> struct result_type<long int> {typedef mpreal type;};
template <> struct result_type<int> {typedef mpreal type;};
template <> struct result_type<long long> {typedef mpreal type;};
template <> struct result_type<unsigned long long> {typedef mpreal type;};
}
// + Addition
template <typename Rhs>
inline const typename internal::result_type<Rhs>::type
operator+(const mpreal& lhs, const Rhs& rhs){ return mpreal(lhs) += rhs; }
template <typename Lhs>
inline const typename internal::result_type<Lhs>::type
operator+(const Lhs& lhs, const mpreal& rhs){ return mpreal(rhs) += lhs; }
// - Subtraction
template <typename Rhs>
inline const typename internal::result_type<Rhs>::type
operator-(const mpreal& lhs, const Rhs& rhs){ return mpreal(lhs) -= rhs; }
template <typename Lhs>
inline const typename internal::result_type<Lhs>::type
operator-(const Lhs& lhs, const mpreal& rhs){ return mpreal(lhs) -= rhs; }
// * Multiplication
template <typename Rhs>
inline const typename internal::result_type<Rhs>::type
operator*(const mpreal& lhs, const Rhs& rhs){ return mpreal(lhs) *= rhs; }
template <typename Lhs>
inline const typename internal::result_type<Lhs>::type
operator*(const Lhs& lhs, const mpreal& rhs){ return mpreal(rhs) *= lhs; }
// / Division
template <typename Rhs>
inline const typename internal::result_type<Rhs>::type
operator/(const mpreal& lhs, const Rhs& rhs){ return mpreal(lhs) /= rhs; }
template <typename Lhs>
inline const typename internal::result_type<Lhs>::type
operator/(const Lhs& lhs, const mpreal& rhs){ return mpreal(lhs) /= rhs; }
//////////////////////////////////////////////////////////////////////////
// sqrt
const mpreal sqrt(const unsigned int v, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal sqrt(const long int v, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal sqrt(const int v, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal sqrt(const long double v, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal sqrt(const double v, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
// abs
inline const mpreal abs(const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd());
//////////////////////////////////////////////////////////////////////////
// pow
const mpreal pow(const mpreal& a, const unsigned int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const mpreal& a, const int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const mpreal& a, const long double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const mpreal& a, const double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const unsigned int a, const mpreal& b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const long int a, const mpreal& b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const int a, const mpreal& b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const long double a, const mpreal& b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const double a, const mpreal& b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const unsigned long int a, const unsigned int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const unsigned long int a, const long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const unsigned long int a, const int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const unsigned long int a, const long double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const unsigned long int a, const double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const unsigned int a, const unsigned long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const unsigned int a, const unsigned int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const unsigned int a, const long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const unsigned int a, const int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const unsigned int a, const long double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const unsigned int a, const double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const long int a, const unsigned long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const long int a, const unsigned int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const long int a, const long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const long int a, const int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const long int a, const long double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const long int a, const double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const int a, const unsigned long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const int a, const unsigned int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const int a, const long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const int a, const int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const int a, const long double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const int a, const double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const long double a, const long double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const long double a, const unsigned long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const long double a, const unsigned int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const long double a, const long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const long double a, const int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const double a, const double b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const double a, const unsigned long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const double a, const unsigned int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const double a, const long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
const mpreal pow(const double a, const int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
inline const mpreal mul_2ui(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
inline const mpreal mul_2si(const mpreal& v, long int k, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
inline const mpreal div_2ui(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
inline const mpreal div_2si(const mpreal& v, long int k, mp_rnd_t rnd_mode = mpreal::get_default_rnd());
//////////////////////////////////////////////////////////////////////////
// Estimate machine epsilon for the given precision
// Returns smallest eps such that 1.0 + eps != 1.0
inline mpreal machine_epsilon(mp_prec_t prec = mpreal::get_default_prec());
// Returns smallest eps such that x + eps != x (relative machine epsilon)
inline mpreal machine_epsilon(const mpreal& x);
// Gives max & min values for the required precision,
// minval is 'safe' meaning 1 / minval does not overflow
// maxval is 'safe' meaning 1 / maxval does not underflow
inline mpreal minval(mp_prec_t prec = mpreal::get_default_prec());
inline mpreal maxval(mp_prec_t prec = mpreal::get_default_prec());
// 'Dirty' equality check 1: |a-b| < min{|a|,|b|} * eps
inline bool isEqualFuzzy(const mpreal& a, const mpreal& b, const mpreal& eps);
// 'Dirty' equality check 2: |a-b| < min{|a|,|b|} * eps( min{|a|,|b|} )
inline bool isEqualFuzzy(const mpreal& a, const mpreal& b);
// 'Bitwise' equality check
// maxUlps - a and b can be apart by maxUlps binary numbers.
inline bool isEqualUlps(const mpreal& a, const mpreal& b, int maxUlps);
//////////////////////////////////////////////////////////////////////////
// Convert precision in 'bits' to decimal digits and vice versa.
// bits = ceil(digits*log[2](10))
// digits = floor(bits*log[10](2))
inline mp_prec_t digits2bits(int d);
inline int bits2digits(mp_prec_t b);
//////////////////////////////////////////////////////////////////////////
// min, max
const mpreal (max)(const mpreal& x, const mpreal& y);
const mpreal (min)(const mpreal& x, const mpreal& y);
//////////////////////////////////////////////////////////////////////////
// Implementation
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
// Operators - Assignment
inline mpreal& mpreal::operator=(const mpreal& v)
{
if (this != &v)
{
mp_prec_t tp = mpfr_get_prec( mpfr_srcptr());
mp_prec_t vp = mpfr_get_prec(v.mpfr_srcptr());
if(tp != vp){
clear(mpfr_ptr());
mpfr_init2(mpfr_ptr(), vp);
}
mpfr_set(mpfr_ptr(), v.mpfr_srcptr(), mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
}
return *this;
}
inline mpreal& mpreal::operator=(const mpf_t v)
{
mpfr_set_f(mpfr_ptr(), v, mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator=(const mpz_t v)
{
mpfr_set_z(mpfr_ptr(), v, mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator=(const mpq_t v)
{
mpfr_set_q(mpfr_ptr(), v, mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator=(const long double v)
{
mpfr_set_ld(mpfr_ptr(), v, mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator=(const double v)
{
#if (MPREAL_DOUBLE_BITS_OVERFLOW > -1)
if(fits_in_bits(v, MPREAL_DOUBLE_BITS_OVERFLOW))
{
mpfr_set_d(mpfr_ptr(),v,mpreal::get_default_rnd());
}else
throw conversion_overflow();
#else
mpfr_set_d(mpfr_ptr(),v,mpreal::get_default_rnd());
#endif
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator=(const unsigned long int v)
{
mpfr_set_ui(mpfr_ptr(), v, mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator=(const unsigned int v)
{
mpfr_set_ui(mpfr_ptr(), v, mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator=(const unsigned long long int v)
{
mpfr_set_uj(mpfr_ptr(), v, mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator=(const long long int v)
{
mpfr_set_sj(mpfr_ptr(), v, mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator=(const long int v)
{
mpfr_set_si(mpfr_ptr(), v, mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator=(const int v)
{
mpfr_set_si(mpfr_ptr(), v, mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator=(const char* s)
{
// Use other converters for more precise control on base & precision & rounding:
//
// mpreal(const char* s, mp_prec_t prec, int base, mp_rnd_t mode)
// mpreal(const std::string& s,mp_prec_t prec, int base, mp_rnd_t mode)
//
// Here we assume base = 10 and we use precision of target variable.
mpfr_t t;
mpfr_init2(t, mpfr_get_prec(mpfr_srcptr()));
if(0 == mpfr_set_str(t, s, 10, mpreal::get_default_rnd()))
{
mpfr_set(mpfr_ptr(), t, mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
}
clear(t);
return *this;
}
inline mpreal& mpreal::operator=(const std::string& s)
{
// Use other converters for more precise control on base & precision & rounding:
//
// mpreal(const char* s, mp_prec_t prec, int base, mp_rnd_t mode)
// mpreal(const std::string& s,mp_prec_t prec, int base, mp_rnd_t mode)
//
// Here we assume base = 10 and we use precision of target variable.
mpfr_t t;
mpfr_init2(t, mpfr_get_prec(mpfr_srcptr()));
if(0 == mpfr_set_str(t, s.c_str(), 10, mpreal::get_default_rnd()))
{
mpfr_set(mpfr_ptr(), t, mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
}
clear(t);
return *this;
}
template <typename real_t>
inline mpreal& mpreal::operator= (const std::complex<real_t>& z)
{
return *this = z.real();
}
//////////////////////////////////////////////////////////////////////////
// + Addition
inline mpreal& mpreal::operator+=(const mpreal& v)
{
mpfr_add(mpfr_ptr(), mpfr_srcptr(), v.mpfr_srcptr(), mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator+=(const mpf_t u)
{
*this += mpreal(u);
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator+=(const mpz_t u)
{
mpfr_add_z(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator+=(const mpq_t u)
{
mpfr_add_q(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator+= (const long double u)
{
*this += mpreal(u);
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator+= (const double u)
{
#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0))
mpfr_add_d(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd());
#else
*this += mpreal(u);
#endif
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator+=(const unsigned long int u)
{
mpfr_add_ui(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator+=(const unsigned int u)
{
mpfr_add_ui(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator+=(const long int u)
{
mpfr_add_si(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator+=(const int u)
{
mpfr_add_si(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator+=(const long long int u) { *this += mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
inline mpreal& mpreal::operator+=(const unsigned long long int u){ *this += mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
inline mpreal& mpreal::operator-=(const long long int u) { *this -= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
inline mpreal& mpreal::operator-=(const unsigned long long int u){ *this -= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
inline mpreal& mpreal::operator*=(const long long int u) { *this *= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
inline mpreal& mpreal::operator*=(const unsigned long long int u){ *this *= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
inline mpreal& mpreal::operator/=(const long long int u) { *this /= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
inline mpreal& mpreal::operator/=(const unsigned long long int u){ *this /= mpreal(u); MPREAL_MSVC_DEBUGVIEW_CODE; return *this; }
inline const mpreal mpreal::operator+()const { return mpreal(*this); }
inline const mpreal operator+(const mpreal& a, const mpreal& b)
{
mpreal c(0, (std::max)(mpfr_get_prec(a.mpfr_ptr()), mpfr_get_prec(b.mpfr_ptr())));
mpfr_add(c.mpfr_ptr(), a.mpfr_srcptr(), b.mpfr_srcptr(), mpreal::get_default_rnd());
return c;
}
inline mpreal& mpreal::operator++()
{
return *this += 1;
}
inline const mpreal mpreal::operator++ (int)
{
mpreal x(*this);
*this += 1;
return x;
}
inline mpreal& mpreal::operator--()
{
return *this -= 1;
}
inline const mpreal mpreal::operator-- (int)
{
mpreal x(*this);
*this -= 1;
return x;
}
//////////////////////////////////////////////////////////////////////////
// - Subtraction
inline mpreal& mpreal::operator-=(const mpreal& v)
{
mpfr_sub(mpfr_ptr(),mpfr_srcptr(),v.mpfr_srcptr(),mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator-=(const mpz_t v)
{
mpfr_sub_z(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator-=(const mpq_t v)
{
mpfr_sub_q(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator-=(const long double v)
{
*this -= mpreal(v);
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator-=(const double v)
{
#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0))
mpfr_sub_d(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
#else
*this -= mpreal(v);
#endif
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator-=(const unsigned long int v)
{
mpfr_sub_ui(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator-=(const unsigned int v)
{
mpfr_sub_ui(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator-=(const long int v)
{
mpfr_sub_si(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator-=(const int v)
{
mpfr_sub_si(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline const mpreal mpreal::operator-()const
{
mpreal u(*this);
mpfr_neg(u.mpfr_ptr(),u.mpfr_srcptr(),mpreal::get_default_rnd());
return u;
}
inline const mpreal operator-(const mpreal& a, const mpreal& b)
{
mpreal c(0, (std::max)(mpfr_get_prec(a.mpfr_ptr()), mpfr_get_prec(b.mpfr_ptr())));
mpfr_sub(c.mpfr_ptr(), a.mpfr_srcptr(), b.mpfr_srcptr(), mpreal::get_default_rnd());
return c;
}
inline const mpreal operator-(const double b, const mpreal& a)
{
#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0))
mpreal x(0, mpfr_get_prec(a.mpfr_ptr()));
mpfr_d_sub(x.mpfr_ptr(), b, a.mpfr_srcptr(), mpreal::get_default_rnd());
return x;
#else
mpreal x(b, mpfr_get_prec(a.mpfr_ptr()));
x -= a;
return x;
#endif
}
inline const mpreal operator-(const unsigned long int b, const mpreal& a)
{
mpreal x(0, mpfr_get_prec(a.mpfr_ptr()));
mpfr_ui_sub(x.mpfr_ptr(), b, a.mpfr_srcptr(), mpreal::get_default_rnd());
return x;
}
inline const mpreal operator-(const unsigned int b, const mpreal& a)
{
mpreal x(0, mpfr_get_prec(a.mpfr_ptr()));
mpfr_ui_sub(x.mpfr_ptr(), b, a.mpfr_srcptr(), mpreal::get_default_rnd());
return x;
}
inline const mpreal operator-(const long int b, const mpreal& a)
{
mpreal x(0, mpfr_get_prec(a.mpfr_ptr()));
mpfr_si_sub(x.mpfr_ptr(), b, a.mpfr_srcptr(), mpreal::get_default_rnd());
return x;
}
inline const mpreal operator-(const int b, const mpreal& a)
{
mpreal x(0, mpfr_get_prec(a.mpfr_ptr()));
mpfr_si_sub(x.mpfr_ptr(), b, a.mpfr_srcptr(), mpreal::get_default_rnd());
return x;
}
//////////////////////////////////////////////////////////////////////////
// * Multiplication
inline mpreal& mpreal::operator*= (const mpreal& v)
{
mpfr_mul(mpfr_ptr(),mpfr_srcptr(),v.mpfr_srcptr(),mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator*=(const mpz_t v)
{
mpfr_mul_z(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator*=(const mpq_t v)
{
mpfr_mul_q(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator*=(const long double v)
{
*this *= mpreal(v);
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator*=(const double v)
{
#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0))
mpfr_mul_d(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
#else
*this *= mpreal(v);
#endif
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator*=(const unsigned long int v)
{
mpfr_mul_ui(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator*=(const unsigned int v)
{
mpfr_mul_ui(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator*=(const long int v)
{
mpfr_mul_si(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator*=(const int v)
{
mpfr_mul_si(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline const mpreal operator*(const mpreal& a, const mpreal& b)
{
mpreal c(0, (std::max)(mpfr_get_prec(a.mpfr_ptr()), mpfr_get_prec(b.mpfr_ptr())));
mpfr_mul(c.mpfr_ptr(), a.mpfr_srcptr(), b.mpfr_srcptr(), mpreal::get_default_rnd());
return c;
}
//////////////////////////////////////////////////////////////////////////
// / Division
inline mpreal& mpreal::operator/=(const mpreal& v)
{
mpfr_div(mpfr_ptr(),mpfr_srcptr(),v.mpfr_srcptr(),mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator/=(const mpz_t v)
{
mpfr_div_z(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator/=(const mpq_t v)
{
mpfr_div_q(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator/=(const long double v)
{
*this /= mpreal(v);
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator/=(const double v)
{
#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0))
mpfr_div_d(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
#else
*this /= mpreal(v);
#endif
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator/=(const unsigned long int v)
{
mpfr_div_ui(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator/=(const unsigned int v)
{
mpfr_div_ui(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator/=(const long int v)
{
mpfr_div_si(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator/=(const int v)
{
mpfr_div_si(mpfr_ptr(),mpfr_srcptr(),v,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline const mpreal operator/(const mpreal& a, const mpreal& b)
{
mpreal c(0, (std::max)(mpfr_get_prec(a.mpfr_srcptr()), mpfr_get_prec(b.mpfr_srcptr())));
mpfr_div(c.mpfr_ptr(), a.mpfr_srcptr(), b.mpfr_srcptr(), mpreal::get_default_rnd());
return c;
}
inline const mpreal operator/(const unsigned long int b, const mpreal& a)
{
mpreal x(0, mpfr_get_prec(a.mpfr_srcptr()));
mpfr_ui_div(x.mpfr_ptr(), b, a.mpfr_srcptr(), mpreal::get_default_rnd());
return x;
}
inline const mpreal operator/(const unsigned int b, const mpreal& a)
{
mpreal x(0, mpfr_get_prec(a.mpfr_srcptr()));
mpfr_ui_div(x.mpfr_ptr(), b, a.mpfr_srcptr(), mpreal::get_default_rnd());
return x;
}
inline const mpreal operator/(const long int b, const mpreal& a)
{
mpreal x(0, mpfr_get_prec(a.mpfr_srcptr()));
mpfr_si_div(x.mpfr_ptr(), b, a.mpfr_srcptr(), mpreal::get_default_rnd());
return x;
}
inline const mpreal operator/(const int b, const mpreal& a)
{
mpreal x(0, mpfr_get_prec(a.mpfr_srcptr()));
mpfr_si_div(x.mpfr_ptr(), b, a.mpfr_srcptr(), mpreal::get_default_rnd());
return x;
}
inline const mpreal operator/(const double b, const mpreal& a)
{
#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0))
mpreal x(0, mpfr_get_prec(a.mpfr_srcptr()));
mpfr_d_div(x.mpfr_ptr(), b, a.mpfr_srcptr(), mpreal::get_default_rnd());
return x;
#else
mpreal x(0, mpfr_get_prec(a.mpfr_ptr()));
x /= a;
return x;
#endif
}
//////////////////////////////////////////////////////////////////////////
// Shifts operators - Multiplication/Division by power of 2
inline mpreal& mpreal::operator<<=(const unsigned long int u)
{
mpfr_mul_2ui(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator<<=(const unsigned int u)
{
mpfr_mul_2ui(mpfr_ptr(),mpfr_srcptr(),static_cast<unsigned long int>(u),mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator<<=(const long int u)
{
mpfr_mul_2si(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator<<=(const int u)
{
mpfr_mul_2si(mpfr_ptr(),mpfr_srcptr(),static_cast<long int>(u),mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator>>=(const unsigned long int u)
{
mpfr_div_2ui(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator>>=(const unsigned int u)
{
mpfr_div_2ui(mpfr_ptr(),mpfr_srcptr(),static_cast<unsigned long int>(u),mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator>>=(const long int u)
{
mpfr_div_2si(mpfr_ptr(),mpfr_srcptr(),u,mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::operator>>=(const int u)
{
mpfr_div_2si(mpfr_ptr(),mpfr_srcptr(),static_cast<long int>(u),mpreal::get_default_rnd());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline const mpreal operator<<(const mpreal& v, const unsigned long int k)
{
return mul_2ui(v,k);
}
inline const mpreal operator<<(const mpreal& v, const unsigned int k)
{
return mul_2ui(v,static_cast<unsigned long int>(k));
}
inline const mpreal operator<<(const mpreal& v, const long int k)
{
return mul_2si(v,k);
}
inline const mpreal operator<<(const mpreal& v, const int k)
{
return mul_2si(v,static_cast<long int>(k));
}
inline const mpreal operator>>(const mpreal& v, const unsigned long int k)
{
return div_2ui(v,k);
}
inline const mpreal operator>>(const mpreal& v, const long int k)
{
return div_2si(v,k);
}
inline const mpreal operator>>(const mpreal& v, const unsigned int k)
{
return div_2ui(v,static_cast<unsigned long int>(k));
}
inline const mpreal operator>>(const mpreal& v, const int k)
{
return div_2si(v,static_cast<long int>(k));
}
// mul_2ui
inline const mpreal mul_2ui(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode)
{
mpreal x(v);
mpfr_mul_2ui(x.mpfr_ptr(),v.mpfr_srcptr(),k,rnd_mode);
return x;
}
// mul_2si
inline const mpreal mul_2si(const mpreal& v, long int k, mp_rnd_t rnd_mode)
{
mpreal x(v);
mpfr_mul_2si(x.mpfr_ptr(),v.mpfr_srcptr(),k,rnd_mode);
return x;
}
inline const mpreal div_2ui(const mpreal& v, unsigned long int k, mp_rnd_t rnd_mode)
{
mpreal x(v);
mpfr_div_2ui(x.mpfr_ptr(),v.mpfr_srcptr(),k,rnd_mode);
return x;
}
inline const mpreal div_2si(const mpreal& v, long int k, mp_rnd_t rnd_mode)
{
mpreal x(v);
mpfr_div_2si(x.mpfr_ptr(),v.mpfr_srcptr(),k,rnd_mode);
return x;
}
//////////////////////////////////////////////////////////////////////////
//Relational operators
// WARNING:
//
// Please note that following checks for double-NaN are guaranteed to work only in IEEE math mode:
//
// isnan(b) = (b != b)
// isnan(b) = !(b == b) (we use in code below)
//
// Be cautions if you use compiler options which break strict IEEE compliance (e.g. -ffast-math in GCC).
// Use std::isnan instead (C++11).
inline bool operator > (const mpreal& a, const mpreal& b ){ return (mpfr_greater_p(a.mpfr_srcptr(),b.mpfr_srcptr()) != 0 ); }
inline bool operator > (const mpreal& a, const unsigned long int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_ui(a.mpfr_srcptr(),b) > 0 ); }
inline bool operator > (const mpreal& a, const unsigned int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_ui(a.mpfr_srcptr(),b) > 0 ); }
inline bool operator > (const mpreal& a, const long int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_si(a.mpfr_srcptr(),b) > 0 ); }
inline bool operator > (const mpreal& a, const int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_si(a.mpfr_srcptr(),b) > 0 ); }
inline bool operator > (const mpreal& a, const long double b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (b == b) && (mpfr_cmp_ld(a.mpfr_srcptr(),b) > 0 ); }
inline bool operator > (const mpreal& a, const double b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (b == b) && (mpfr_cmp_d (a.mpfr_srcptr(),b) > 0 ); }
inline bool operator >= (const mpreal& a, const mpreal& b ){ return (mpfr_greaterequal_p(a.mpfr_srcptr(),b.mpfr_srcptr()) != 0 ); }
inline bool operator >= (const mpreal& a, const unsigned long int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_ui(a.mpfr_srcptr(),b) >= 0 ); }
// inline bool operator >= (const mpreal& a, const unsigned int b ){ return !isnan EIGEN_NOT_A_MACRO (isnan()a) && (mpfr_cmp_ui(a.mpfr_srcptr(),b) >= 0 ); }
inline bool operator >= (const mpreal& a, const long int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_si(a.mpfr_srcptr(),b) >= 0 ); }
inline bool operator >= (const mpreal& a, const int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_si(a.mpfr_srcptr(),b) >= 0 ); }
inline bool operator >= (const mpreal& a, const long double b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (b == b) && (mpfr_cmp_ld(a.mpfr_srcptr(),b) >= 0 ); }
inline bool operator >= (const mpreal& a, const double b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (b == b) && (mpfr_cmp_d (a.mpfr_srcptr(),b) >= 0 ); }
inline bool operator < (const mpreal& a, const mpreal& b ){ return (mpfr_less_p(a.mpfr_srcptr(),b.mpfr_srcptr()) != 0 ); }
inline bool operator < (const mpreal& a, const unsigned long int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_ui(a.mpfr_srcptr(),b) < 0 ); }
inline bool operator < (const mpreal& a, const unsigned int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_ui(a.mpfr_srcptr(),b) < 0 ); }
inline bool operator < (const mpreal& a, const long int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_si(a.mpfr_srcptr(),b) < 0 ); }
inline bool operator < (const mpreal& a, const int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_si(a.mpfr_srcptr(),b) < 0 ); }
inline bool operator < (const mpreal& a, const long double b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (b == b) && (mpfr_cmp_ld(a.mpfr_srcptr(),b) < 0 ); }
inline bool operator < (const mpreal& a, const double b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (b == b) && (mpfr_cmp_d (a.mpfr_srcptr(),b) < 0 ); }
inline bool operator <= (const mpreal& a, const mpreal& b ){ return (mpfr_lessequal_p(a.mpfr_srcptr(),b.mpfr_srcptr()) != 0 ); }
inline bool operator <= (const mpreal& a, const unsigned long int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_ui(a.mpfr_srcptr(),b) <= 0 ); }
inline bool operator <= (const mpreal& a, const unsigned int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_ui(a.mpfr_srcptr(),b) <= 0 ); }
inline bool operator <= (const mpreal& a, const long int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_si(a.mpfr_srcptr(),b) <= 0 ); }
inline bool operator <= (const mpreal& a, const int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_si(a.mpfr_srcptr(),b) <= 0 ); }
inline bool operator <= (const mpreal& a, const long double b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (b == b) && (mpfr_cmp_ld(a.mpfr_srcptr(),b) <= 0 ); }
inline bool operator <= (const mpreal& a, const double b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (b == b) && (mpfr_cmp_d (a.mpfr_srcptr(),b) <= 0 ); }
inline bool operator == (const mpreal& a, const mpreal& b ){ return (mpfr_equal_p(a.mpfr_srcptr(),b.mpfr_srcptr()) != 0 ); }
inline bool operator == (const mpreal& a, const unsigned long int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_ui(a.mpfr_srcptr(),b) == 0 ); }
inline bool operator == (const mpreal& a, const unsigned int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_ui(a.mpfr_srcptr(),b) == 0 ); }
inline bool operator == (const mpreal& a, const long int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_si(a.mpfr_srcptr(),b) == 0 ); }
inline bool operator == (const mpreal& a, const int b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (mpfr_cmp_si(a.mpfr_srcptr(),b) == 0 ); }
inline bool operator == (const mpreal& a, const long double b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (b == b) && (mpfr_cmp_ld(a.mpfr_srcptr(),b) == 0 ); }
inline bool operator == (const mpreal& a, const double b ){ return !isnan EIGEN_NOT_A_MACRO (a) && (b == b) && (mpfr_cmp_d (a.mpfr_srcptr(),b) == 0 ); }
inline bool operator != (const mpreal& a, const mpreal& b ){ return !(a == b); }
inline bool operator != (const mpreal& a, const unsigned long int b ){ return !(a == b); }
inline bool operator != (const mpreal& a, const unsigned int b ){ return !(a == b); }
inline bool operator != (const mpreal& a, const long int b ){ return !(a == b); }
inline bool operator != (const mpreal& a, const int b ){ return !(a == b); }
inline bool operator != (const mpreal& a, const long double b ){ return !(a == b); }
inline bool operator != (const mpreal& a, const double b ){ return !(a == b); }
inline bool (isnan) (const mpreal& op){ return (mpfr_nan_p (op.mpfr_srcptr()) != 0 ); }
inline bool (isinf) (const mpreal& op){ return (mpfr_inf_p (op.mpfr_srcptr()) != 0 ); }
inline bool (isfinite) (const mpreal& op){ return (mpfr_number_p (op.mpfr_srcptr()) != 0 ); }
inline bool iszero (const mpreal& op){ return (mpfr_zero_p (op.mpfr_srcptr()) != 0 ); }
inline bool isint (const mpreal& op){ return (mpfr_integer_p(op.mpfr_srcptr()) != 0 ); }
#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0))
inline bool isregular(const mpreal& op){ return (mpfr_regular_p(op.mpfr_srcptr()));}
#endif
//////////////////////////////////////////////////////////////////////////
// Type Converters
inline bool mpreal::toBool ( ) const { return mpfr_zero_p (mpfr_srcptr()) == 0; }
inline long mpreal::toLong (mp_rnd_t mode) const { return mpfr_get_si (mpfr_srcptr(), mode); }
inline unsigned long mpreal::toULong (mp_rnd_t mode) const { return mpfr_get_ui (mpfr_srcptr(), mode); }
inline float mpreal::toFloat (mp_rnd_t mode) const { return mpfr_get_flt(mpfr_srcptr(), mode); }
inline double mpreal::toDouble (mp_rnd_t mode) const { return mpfr_get_d (mpfr_srcptr(), mode); }
inline long double mpreal::toLDouble(mp_rnd_t mode) const { return mpfr_get_ld (mpfr_srcptr(), mode); }
inline long long mpreal::toLLong (mp_rnd_t mode) const { return mpfr_get_sj (mpfr_srcptr(), mode); }
inline unsigned long long mpreal::toULLong (mp_rnd_t mode) const { return mpfr_get_uj (mpfr_srcptr(), mode); }
inline ::mpfr_ptr mpreal::mpfr_ptr() { return mp; }
inline ::mpfr_srcptr mpreal::mpfr_ptr() const { return mp; }
inline ::mpfr_srcptr mpreal::mpfr_srcptr() const { return mp; }
template <class T>
inline std::string toString(T t, std::ios_base & (*f)(std::ios_base&))
{
std::ostringstream oss;
oss << f << t;
return oss.str();
}
#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0))
inline std::string mpreal::toString(const std::string& format) const
{
char *s = NULL;
std::string out;
if( !format.empty() )
{
if(!(mpfr_asprintf(&s, format.c_str(), mpfr_srcptr()) < 0))
{
out = std::string(s);
mpfr_free_str(s);
}
}
return out;
}
#endif
inline std::string mpreal::toString(int n, int b, mp_rnd_t mode) const
{
// TODO: Add extended format specification (f, e, rounding mode) as it done in output operator
(void)b;
(void)mode;
#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0))
std::ostringstream format;
int digits = (n >= 0) ? n : 1 + bits2digits(mpfr_get_prec(mpfr_srcptr()));
format << "%." << digits << "RNg";
return toString(format.str());
#else
char *s, *ns = NULL;
size_t slen, nslen;
mp_exp_t exp;
std::string out;
if(mpfr_inf_p(mp))
{
if(mpfr_sgn(mp)>0) return "+Inf";
else return "-Inf";
}
if(mpfr_zero_p(mp)) return "0";
if(mpfr_nan_p(mp)) return "NaN";
s = mpfr_get_str(NULL, &exp, b, 0, mp, mode);
ns = mpfr_get_str(NULL, &exp, b, (std::max)(0,n), mp, mode);
if(s!=NULL && ns!=NULL)
{
slen = strlen(s);
nslen = strlen(ns);
if(nslen<=slen)
{
mpfr_free_str(s);
s = ns;
slen = nslen;
}
else {
mpfr_free_str(ns);
}
// Make human eye-friendly formatting if possible
if (exp>0 && static_cast<size_t>(exp)<slen)
{
if(s[0]=='-')
{
// Remove zeros starting from right end
char* ptr = s+slen-1;
while (*ptr=='0' && ptr>s+exp) ptr--;
if(ptr==s+exp) out = std::string(s,exp+1);
else out = std::string(s,exp+1)+'.'+std::string(s+exp+1,ptr-(s+exp+1)+1);
//out = string(s,exp+1)+'.'+string(s+exp+1);
}
else
{
// Remove zeros starting from right end
char* ptr = s+slen-1;
while (*ptr=='0' && ptr>s+exp-1) ptr--;
if(ptr==s+exp-1) out = std::string(s,exp);
else out = std::string(s,exp)+'.'+std::string(s+exp,ptr-(s+exp)+1);
//out = string(s,exp)+'.'+string(s+exp);
}
}else{ // exp<0 || exp>slen
if(s[0]=='-')
{
// Remove zeros starting from right end
char* ptr = s+slen-1;
while (*ptr=='0' && ptr>s+1) ptr--;
if(ptr==s+1) out = std::string(s,2);
else out = std::string(s,2)+'.'+std::string(s+2,ptr-(s+2)+1);
//out = string(s,2)+'.'+string(s+2);
}
else
{
// Remove zeros starting from right end
char* ptr = s+slen-1;
while (*ptr=='0' && ptr>s) ptr--;
if(ptr==s) out = std::string(s,1);
else out = std::string(s,1)+'.'+std::string(s+1,ptr-(s+1)+1);
//out = string(s,1)+'.'+string(s+1);
}
// Make final string
if(--exp)
{
if(exp>0) out += "e+"+mpfr::toString<mp_exp_t>(exp,std::dec);
else out += "e"+mpfr::toString<mp_exp_t>(exp,std::dec);
}
}
mpfr_free_str(s);
return out;
}else{
return "conversion error!";
}
#endif
}
//////////////////////////////////////////////////////////////////////////
// I/O
inline std::ostream& mpreal::output(std::ostream& os) const
{
std::ostringstream format;
const std::ios::fmtflags flags = os.flags();
format << ((flags & std::ios::showpos) ? "%+" : "%");
if (os.precision() >= 0)
format << '.' << os.precision() << "R*"
<< ((flags & std::ios::floatfield) == std::ios::fixed ? 'f' :
(flags & std::ios::floatfield) == std::ios::scientific ? 'e' :
'g');
else
format << "R*e";
char *s = NULL;
if(!(mpfr_asprintf(&s, format.str().c_str(),
mpfr::mpreal::get_default_rnd(),
mpfr_srcptr())
< 0))
{
os << std::string(s);
mpfr_free_str(s);
}
return os;
}
inline std::ostream& operator<<(std::ostream& os, const mpreal& v)
{
return v.output(os);
}
inline std::istream& operator>>(std::istream &is, mpreal& v)
{
// TODO: use cout::hexfloat and other flags to setup base
std::string tmp;
is >> tmp;
mpfr_set_str(v.mpfr_ptr(), tmp.c_str(), 10, mpreal::get_default_rnd());
return is;
}
//////////////////////////////////////////////////////////////////////////
// Bits - decimal digits relation
// bits = ceil(digits*log[2](10))
// digits = floor(bits*log[10](2))
inline mp_prec_t digits2bits(int d)
{
const double LOG2_10 = 3.3219280948873624;
return mp_prec_t(std::ceil( d * LOG2_10 ));
}
inline int bits2digits(mp_prec_t b)
{
const double LOG10_2 = 0.30102999566398119;
return int(std::floor( b * LOG10_2 ));
}
//////////////////////////////////////////////////////////////////////////
// Set/Get number properties
inline int sgn(const mpreal& op)
{
return mpfr_sgn(op.mpfr_srcptr());
}
inline mpreal& mpreal::setSign(int sign, mp_rnd_t RoundingMode)
{
mpfr_setsign(mpfr_ptr(), mpfr_srcptr(), (sign < 0 ? 1 : 0), RoundingMode);
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline int mpreal::getPrecision() const
{
return int(mpfr_get_prec(mpfr_srcptr()));
}
inline mpreal& mpreal::setPrecision(int Precision, mp_rnd_t RoundingMode)
{
mpfr_prec_round(mpfr_ptr(), Precision, RoundingMode);
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::setInf(int sign)
{
mpfr_set_inf(mpfr_ptr(), sign);
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::setNan()
{
mpfr_set_nan(mpfr_ptr());
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mpreal& mpreal::setZero(int sign)
{
#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0))
mpfr_set_zero(mpfr_ptr(), sign);
#else
mpfr_set_si(mpfr_ptr(), 0, (mpfr_get_default_rounding_mode)());
setSign(sign);
#endif
MPREAL_MSVC_DEBUGVIEW_CODE;
return *this;
}
inline mp_prec_t mpreal::get_prec() const
{
return mpfr_get_prec(mpfr_srcptr());
}
inline void mpreal::set_prec(mp_prec_t prec, mp_rnd_t rnd_mode)
{
mpfr_prec_round(mpfr_ptr(),prec,rnd_mode);
MPREAL_MSVC_DEBUGVIEW_CODE;
}
inline mp_exp_t mpreal::get_exp ()
{
return mpfr_get_exp(mpfr_srcptr());
}
inline int mpreal::set_exp (mp_exp_t e)
{
int x = mpfr_set_exp(mpfr_ptr(), e);
MPREAL_MSVC_DEBUGVIEW_CODE;
return x;
}
inline const mpreal frexp(const mpreal& x, mp_exp_t* exp, mp_rnd_t mode = mpreal::get_default_rnd())
{
mpreal y(x);
#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,1,0))
mpfr_frexp(exp,y.mpfr_ptr(),x.mpfr_srcptr(),mode);
#else
*exp = mpfr_get_exp(y.mpfr_srcptr());
mpfr_set_exp(y.mpfr_ptr(),0);
#endif
return y;
}
inline const mpreal ldexp(const mpreal& v, mp_exp_t exp)
{
mpreal x(v);
// rounding is not important since we are just increasing the exponent (= exact operation)
mpfr_mul_2si(x.mpfr_ptr(), x.mpfr_srcptr(), exp, mpreal::get_default_rnd());
return x;
}
inline const mpreal scalbn(const mpreal& v, mp_exp_t exp)
{
return ldexp(v, exp);
}
inline mpreal machine_epsilon(mp_prec_t prec)
{
/* the smallest eps such that 1 + eps != 1 */
return machine_epsilon(mpreal(1, prec));
}
inline mpreal machine_epsilon(const mpreal& x)
{
/* the smallest eps such that x + eps != x */
if( x < 0)
{
return nextabove(-x) + x;
}else{
return nextabove( x) - x;
}
}
// minval is 'safe' meaning 1 / minval does not overflow
inline mpreal minval(mp_prec_t prec)
{
/* min = 1/2 * 2^emin = 2^(emin - 1) */
return mpreal(1, prec) << mpreal::get_emin()-1;
}
// maxval is 'safe' meaning 1 / maxval does not underflow
inline mpreal maxval(mp_prec_t prec)
{
/* max = (1 - eps) * 2^emax, eps is machine epsilon */
return (mpreal(1, prec) - machine_epsilon(prec)) << mpreal::get_emax();
}
inline bool isEqualUlps(const mpreal& a, const mpreal& b, int maxUlps)
{
return abs(a - b) <= machine_epsilon((max)(abs(a), abs(b))) * maxUlps;
}
inline bool isEqualFuzzy(const mpreal& a, const mpreal& b, const mpreal& eps)
{
return abs(a - b) <= eps;
}
inline bool isEqualFuzzy(const mpreal& a, const mpreal& b)
{
return isEqualFuzzy(a, b, machine_epsilon((max)(1, (min)(abs(a), abs(b)))));
}
//////////////////////////////////////////////////////////////////////////
// C++11 sign functions.
inline mpreal copysign(const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
mpreal rop(0, mpfr_get_prec(x.mpfr_ptr()));
mpfr_setsign(rop.mpfr_ptr(), x.mpfr_srcptr(), mpfr_signbit(y.mpfr_srcptr()), rnd_mode);
return rop;
}
inline bool signbit(const mpreal& x)
{
return mpfr_signbit(x.mpfr_srcptr());
}
inline const mpreal modf(const mpreal& v, mpreal& n)
{
mpreal f(v);
// rounding is not important since we are using the same number
mpfr_frac (f.mpfr_ptr(),f.mpfr_srcptr(),mpreal::get_default_rnd());
mpfr_trunc(n.mpfr_ptr(),v.mpfr_srcptr());
return f;
}
inline int mpreal::check_range (int t, mp_rnd_t rnd_mode)
{
return mpfr_check_range(mpfr_ptr(),t,rnd_mode);
}
inline int mpreal::subnormalize (int t,mp_rnd_t rnd_mode)
{
int r = mpfr_subnormalize(mpfr_ptr(),t,rnd_mode);
MPREAL_MSVC_DEBUGVIEW_CODE;
return r;
}
inline mp_exp_t mpreal::get_emin (void)
{
return mpfr_get_emin();
}
inline int mpreal::set_emin (mp_exp_t exp)
{
return mpfr_set_emin(exp);
}
inline mp_exp_t mpreal::get_emax (void)
{
return mpfr_get_emax();
}
inline int mpreal::set_emax (mp_exp_t exp)
{
return mpfr_set_emax(exp);
}
inline mp_exp_t mpreal::get_emin_min (void)
{
return mpfr_get_emin_min();
}
inline mp_exp_t mpreal::get_emin_max (void)
{
return mpfr_get_emin_max();
}
inline mp_exp_t mpreal::get_emax_min (void)
{
return mpfr_get_emax_min();
}
inline mp_exp_t mpreal::get_emax_max (void)
{
return mpfr_get_emax_max();
}
//////////////////////////////////////////////////////////////////////////
// Mathematical Functions
//////////////////////////////////////////////////////////////////////////
#define MPREAL_UNARY_MATH_FUNCTION_BODY(f) \
mpreal y(0, mpfr_get_prec(x.mpfr_srcptr())); \
mpfr_##f(y.mpfr_ptr(), x.mpfr_srcptr(), r); \
return y;
inline const mpreal sqr (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd())
{ MPREAL_UNARY_MATH_FUNCTION_BODY(sqr ); }
inline const mpreal sqrt (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd())
{ MPREAL_UNARY_MATH_FUNCTION_BODY(sqrt); }
inline const mpreal sqrt(const unsigned long int x, mp_rnd_t r)
{
mpreal y;
mpfr_sqrt_ui(y.mpfr_ptr(), x, r);
return y;
}
inline const mpreal sqrt(const unsigned int v, mp_rnd_t rnd_mode)
{
return sqrt(static_cast<unsigned long int>(v),rnd_mode);
}
inline const mpreal sqrt(const long int v, mp_rnd_t rnd_mode)
{
if (v>=0) return sqrt(static_cast<unsigned long int>(v),rnd_mode);
else return mpreal().setNan(); // NaN
}
inline const mpreal sqrt(const int v, mp_rnd_t rnd_mode)
{
if (v>=0) return sqrt(static_cast<unsigned long int>(v),rnd_mode);
else return mpreal().setNan(); // NaN
}
inline const mpreal root(const mpreal& x, unsigned long int k, mp_rnd_t r = mpreal::get_default_rnd())
{
mpreal y(0, mpfr_get_prec(x.mpfr_srcptr()));
mpfr_root(y.mpfr_ptr(), x.mpfr_srcptr(), k, r);
return y;
}
inline const mpreal dim(const mpreal& a, const mpreal& b, mp_rnd_t r = mpreal::get_default_rnd())
{
mpreal y(0, mpfr_get_prec(a.mpfr_srcptr()));
mpfr_dim(y.mpfr_ptr(), a.mpfr_srcptr(), b.mpfr_srcptr(), r);
return y;
}
inline int cmpabs(const mpreal& a,const mpreal& b)
{
return mpfr_cmpabs(a.mpfr_ptr(), b.mpfr_srcptr());
}
inline int sin_cos(mpreal& s, mpreal& c, const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
return mpfr_sin_cos(s.mpfr_ptr(), c.mpfr_ptr(), v.mpfr_srcptr(), rnd_mode);
}
inline const mpreal sqrt (const long double v, mp_rnd_t rnd_mode) { return sqrt(mpreal(v),rnd_mode); }
inline const mpreal sqrt (const double v, mp_rnd_t rnd_mode) { return sqrt(mpreal(v),rnd_mode); }
inline const mpreal cbrt (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(cbrt ); }
inline const mpreal fabs (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(abs ); }
inline const mpreal abs (const mpreal& x, mp_rnd_t r) { MPREAL_UNARY_MATH_FUNCTION_BODY(abs ); }
inline const mpreal log (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(log ); }
inline const mpreal log2 (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(log2 ); }
inline const mpreal log10 (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(log10); }
inline const mpreal exp (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(exp ); }
inline const mpreal exp2 (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(exp2 ); }
inline const mpreal exp10 (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(exp10); }
inline const mpreal cos (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(cos ); }
inline const mpreal sin (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(sin ); }
inline const mpreal tan (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(tan ); }
inline const mpreal sec (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(sec ); }
inline const mpreal csc (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(csc ); }
inline const mpreal cot (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(cot ); }
inline const mpreal acos (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(acos ); }
inline const mpreal asin (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(asin ); }
inline const mpreal atan (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(atan ); }
inline const mpreal logb (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { return log2 (abs(x),r); }
inline const mpreal acot (const mpreal& v, mp_rnd_t r = mpreal::get_default_rnd()) { return atan (1/v, r); }
inline const mpreal asec (const mpreal& v, mp_rnd_t r = mpreal::get_default_rnd()) { return acos (1/v, r); }
inline const mpreal acsc (const mpreal& v, mp_rnd_t r = mpreal::get_default_rnd()) { return asin (1/v, r); }
inline const mpreal acoth (const mpreal& v, mp_rnd_t r = mpreal::get_default_rnd()) { return atanh(1/v, r); }
inline const mpreal asech (const mpreal& v, mp_rnd_t r = mpreal::get_default_rnd()) { return acosh(1/v, r); }
inline const mpreal acsch (const mpreal& v, mp_rnd_t r = mpreal::get_default_rnd()) { return asinh(1/v, r); }
inline const mpreal cosh (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(cosh ); }
inline const mpreal sinh (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(sinh ); }
inline const mpreal tanh (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(tanh ); }
inline const mpreal sech (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(sech ); }
inline const mpreal csch (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(csch ); }
inline const mpreal coth (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(coth ); }
inline const mpreal acosh (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(acosh); }
inline const mpreal asinh (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(asinh); }
inline const mpreal atanh (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(atanh); }
inline const mpreal log1p (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(log1p ); }
inline const mpreal expm1 (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(expm1 ); }
inline const mpreal eint (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(eint ); }
inline const mpreal gamma (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(gamma ); }
inline const mpreal tgamma (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(gamma ); }
inline const mpreal lngamma (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(lngamma); }
inline const mpreal zeta (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(zeta ); }
inline const mpreal erf (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(erf ); }
inline const mpreal erfc (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(erfc ); }
inline const mpreal besselj0(const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(j0 ); }
inline const mpreal besselj1(const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(j1 ); }
inline const mpreal bessely0(const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(y0 ); }
inline const mpreal bessely1(const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(y1 ); }
inline const mpreal atan2 (const mpreal& y, const mpreal& x, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
mpreal a(0,(std::max)(y.getPrecision(), x.getPrecision()));
mpfr_atan2(a.mpfr_ptr(), y.mpfr_srcptr(), x.mpfr_srcptr(), rnd_mode);
return a;
}
inline const mpreal hypot (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
mpreal a(0,(std::max)(y.getPrecision(), x.getPrecision()));
mpfr_hypot(a.mpfr_ptr(), x.mpfr_srcptr(), y.mpfr_srcptr(), rnd_mode);
return a;
}
inline const mpreal remainder (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
mpreal a(0,(std::max)(y.getPrecision(), x.getPrecision()));
mpfr_remainder(a.mpfr_ptr(), x.mpfr_srcptr(), y.mpfr_srcptr(), rnd_mode);
return a;
}
inline const mpreal remquo (long* q, const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
mpreal a(0,(std::max)(y.getPrecision(), x.getPrecision()));
mpfr_remquo(a.mpfr_ptr(),q, x.mpfr_srcptr(), y.mpfr_srcptr(), rnd_mode);
return a;
}
inline const mpreal fac_ui (unsigned long int v, mp_prec_t prec = mpreal::get_default_prec(),
mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
mpreal x(0, prec);
mpfr_fac_ui(x.mpfr_ptr(),v,rnd_mode);
return x;
}
inline const mpreal lgamma (const mpreal& v, int *signp = 0, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
mpreal x(v);
int tsignp;
if(signp) mpfr_lgamma(x.mpfr_ptr(), signp,v.mpfr_srcptr(),rnd_mode);
else mpfr_lgamma(x.mpfr_ptr(),&tsignp,v.mpfr_srcptr(),rnd_mode);
return x;
}
inline const mpreal besseljn (long n, const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd())
{
mpreal y(0, x.getPrecision());
mpfr_jn(y.mpfr_ptr(), n, x.mpfr_srcptr(), r);
return y;
}
inline const mpreal besselyn (long n, const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd())
{
mpreal y(0, x.getPrecision());
mpfr_yn(y.mpfr_ptr(), n, x.mpfr_srcptr(), r);
return y;
}
inline const mpreal fma (const mpreal& v1, const mpreal& v2, const mpreal& v3, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
mpreal a;
mp_prec_t p1, p2, p3;
p1 = v1.get_prec();
p2 = v2.get_prec();
p3 = v3.get_prec();
a.set_prec(p3>p2?(p3>p1?p3:p1):(p2>p1?p2:p1));
mpfr_fma(a.mp,v1.mp,v2.mp,v3.mp,rnd_mode);
return a;
}
inline const mpreal fms (const mpreal& v1, const mpreal& v2, const mpreal& v3, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
mpreal a;
mp_prec_t p1, p2, p3;
p1 = v1.get_prec();
p2 = v2.get_prec();
p3 = v3.get_prec();
a.set_prec(p3>p2?(p3>p1?p3:p1):(p2>p1?p2:p1));
mpfr_fms(a.mp,v1.mp,v2.mp,v3.mp,rnd_mode);
return a;
}
inline const mpreal agm (const mpreal& v1, const mpreal& v2, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
mpreal a;
mp_prec_t p1, p2;
p1 = v1.get_prec();
p2 = v2.get_prec();
a.set_prec(p1>p2?p1:p2);
mpfr_agm(a.mp, v1.mp, v2.mp, rnd_mode);
return a;
}
inline const mpreal sum (const mpreal tab[], const unsigned long int n, int& status, mp_rnd_t mode = mpreal::get_default_rnd())
{
mpfr_srcptr *p = new mpfr_srcptr[n];
for (unsigned long int i = 0; i < n; i++)
p[i] = tab[i].mpfr_srcptr();
mpreal x;
status = mpfr_sum(x.mpfr_ptr(), (mpfr_ptr*)p, n, mode);
delete [] p;
return x;
}
//////////////////////////////////////////////////////////////////////////
// MPFR 2.4.0 Specifics
#if (MPFR_VERSION >= MPFR_VERSION_NUM(2,4,0))
inline int sinh_cosh(mpreal& s, mpreal& c, const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
return mpfr_sinh_cosh(s.mp,c.mp,v.mp,rnd_mode);
}
inline const mpreal li2 (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd())
{
MPREAL_UNARY_MATH_FUNCTION_BODY(li2);
}
inline const mpreal rem (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
/* R = rem(X,Y) if Y != 0, returns X - n * Y where n = trunc(X/Y). */
return fmod(x, y, rnd_mode);
}
inline const mpreal mod (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
(void)rnd_mode;
/*
m = mod(x,y) if y != 0, returns x - n*y where n = floor(x/y)
The following are true by convention:
- mod(x,0) is x
- mod(x,x) is 0
- mod(x,y) for x != y and y != 0 has the same sign as y.
*/
if(iszero(y)) return x;
if(x == y) return 0;
mpreal m = x - floor(x / y) * y;
m.setSign(sgn(y)); // make sure result has the same sign as Y
return m;
}
inline const mpreal fmod (const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
mpreal a;
mp_prec_t yp, xp;
yp = y.get_prec();
xp = x.get_prec();
a.set_prec(yp>xp?yp:xp);
mpfr_fmod(a.mp, x.mp, y.mp, rnd_mode);
return a;
}
inline const mpreal rec_sqrt(const mpreal& v, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
mpreal x(v);
mpfr_rec_sqrt(x.mp,v.mp,rnd_mode);
return x;
}
#endif // MPFR 2.4.0 Specifics
//////////////////////////////////////////////////////////////////////////
// MPFR 3.0.0 Specifics
#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0))
inline const mpreal digamma (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(digamma); }
inline const mpreal ai (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(ai); }
#endif // MPFR 3.0.0 Specifics
//////////////////////////////////////////////////////////////////////////
// Constants
inline const mpreal const_log2 (mp_prec_t p = mpreal::get_default_prec(), mp_rnd_t r = mpreal::get_default_rnd())
{
mpreal x(0, p);
mpfr_const_log2(x.mpfr_ptr(), r);
return x;
}
inline const mpreal const_pi (mp_prec_t p = mpreal::get_default_prec(), mp_rnd_t r = mpreal::get_default_rnd())
{
mpreal x(0, p);
mpfr_const_pi(x.mpfr_ptr(), r);
return x;
}
inline const mpreal const_euler (mp_prec_t p = mpreal::get_default_prec(), mp_rnd_t r = mpreal::get_default_rnd())
{
mpreal x(0, p);
mpfr_const_euler(x.mpfr_ptr(), r);
return x;
}
inline const mpreal const_catalan (mp_prec_t p = mpreal::get_default_prec(), mp_rnd_t r = mpreal::get_default_rnd())
{
mpreal x(0, p);
mpfr_const_catalan(x.mpfr_ptr(), r);
return x;
}
inline const mpreal const_infinity (int sign = 1, mp_prec_t p = mpreal::get_default_prec())
{
mpreal x(0, p);
mpfr_set_inf(x.mpfr_ptr(), sign);
return x;
}
//////////////////////////////////////////////////////////////////////////
// Integer Related Functions
inline const mpreal ceil(const mpreal& v)
{
mpreal x(v);
mpfr_ceil(x.mp,v.mp);
return x;
}
inline const mpreal floor(const mpreal& v)
{
mpreal x(v);
mpfr_floor(x.mp,v.mp);
return x;
}
inline const mpreal round(const mpreal& v)
{
mpreal x(v);
mpfr_round(x.mp,v.mp);
return x;
}
inline const mpreal trunc(const mpreal& v)
{
mpreal x(v);
mpfr_trunc(x.mp,v.mp);
return x;
}
inline const mpreal rint (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(rint ); }
inline const mpreal rint_ceil (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(rint_ceil ); }
inline const mpreal rint_floor (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(rint_floor); }
inline const mpreal rint_round (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(rint_round); }
inline const mpreal rint_trunc (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(rint_trunc); }
inline const mpreal frac (const mpreal& x, mp_rnd_t r = mpreal::get_default_rnd()) { MPREAL_UNARY_MATH_FUNCTION_BODY(frac ); }
//////////////////////////////////////////////////////////////////////////
// Miscellaneous Functions
inline void swap (mpreal& a, mpreal& b) { mpfr_swap(a.mp,b.mp); }
inline const mpreal (max)(const mpreal& x, const mpreal& y){ return (x>y?x:y); }
inline const mpreal (min)(const mpreal& x, const mpreal& y){ return (x<y?x:y); }
inline const mpreal fmax(const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
mpreal a;
mpfr_max(a.mp,x.mp,y.mp,rnd_mode);
return a;
}
inline const mpreal fmin(const mpreal& x, const mpreal& y, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
mpreal a;
mpfr_min(a.mp,x.mp,y.mp,rnd_mode);
return a;
}
inline const mpreal nexttoward (const mpreal& x, const mpreal& y)
{
mpreal a(x);
mpfr_nexttoward(a.mp,y.mp);
return a;
}
inline const mpreal nextabove (const mpreal& x)
{
mpreal a(x);
mpfr_nextabove(a.mp);
return a;
}
inline const mpreal nextbelow (const mpreal& x)
{
mpreal a(x);
mpfr_nextbelow(a.mp);
return a;
}
inline const mpreal urandomb (gmp_randstate_t& state)
{
mpreal x;
mpfr_urandomb(x.mpfr_ptr(),state);
return x;
}
#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0))
inline const mpreal urandom (gmp_randstate_t& state, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
mpreal x;
mpfr_urandom(x.mpfr_ptr(), state, rnd_mode);
return x;
}
#endif
#if (MPFR_VERSION <= MPFR_VERSION_NUM(2,4,2))
inline const mpreal random2 (mp_size_t size, mp_exp_t exp)
{
mpreal x;
mpfr_random2(x.mpfr_ptr(),size,exp);
return x;
}
#endif
// Uniformly distributed random number generation
// a = random(seed); <- initialization & first random number generation
// a = random(); <- next random numbers generation
// seed != 0
inline const mpreal random(unsigned int seed = 0)
{
#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0))
static gmp_randstate_t state;
static bool initialize = true;
if(initialize)
{
gmp_randinit_default(state);
gmp_randseed_ui(state,0);
initialize = false;
}
if(seed != 0) gmp_randseed_ui(state,seed);
return mpfr::urandom(state);
#else
if(seed != 0) std::srand(seed);
return mpfr::mpreal(std::rand()/(double)RAND_MAX);
#endif
}
#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,1,0))
inline const mpreal grandom (gmp_randstate_t& state, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
mpreal x;
mpfr_grandom(x.mpfr_ptr(), NULL, state, rnd_mode);
return x;
}
inline const mpreal grandom(unsigned int seed = 0)
{
static gmp_randstate_t state;
static bool initialize = true;
if(initialize)
{
gmp_randinit_default(state);
gmp_randseed_ui(state,0);
initialize = false;
}
if(seed != 0) gmp_randseed_ui(state,seed);
return mpfr::grandom(state);
}
#endif
//////////////////////////////////////////////////////////////////////////
// Set/Get global properties
inline void mpreal::set_default_prec(mp_prec_t prec)
{
mpfr_set_default_prec(prec);
}
inline void mpreal::set_default_rnd(mp_rnd_t rnd_mode)
{
mpfr_set_default_rounding_mode(rnd_mode);
}
inline bool mpreal::fits_in_bits(double x, int n)
{
int i;
double t;
return IsInf(x) || (std::modf ( std::ldexp ( std::frexp ( x, &i ), n ), &t ) == 0.0);
}
inline const mpreal pow(const mpreal& a, const mpreal& b, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
mpreal x(a);
mpfr_pow(x.mp,x.mp,b.mp,rnd_mode);
return x;
}
inline const mpreal pow(const mpreal& a, const mpz_t b, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
mpreal x(a);
mpfr_pow_z(x.mp,x.mp,b,rnd_mode);
return x;
}
inline const mpreal pow(const mpreal& a, const unsigned long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
mpreal x(a);
mpfr_pow_ui(x.mp,x.mp,b,rnd_mode);
return x;
}
inline const mpreal pow(const mpreal& a, const unsigned int b, mp_rnd_t rnd_mode)
{
return pow(a,static_cast<unsigned long int>(b),rnd_mode);
}
inline const mpreal pow(const mpreal& a, const long int b, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
mpreal x(a);
mpfr_pow_si(x.mp,x.mp,b,rnd_mode);
return x;
}
inline const mpreal pow(const mpreal& a, const int b, mp_rnd_t rnd_mode)
{
return pow(a,static_cast<long int>(b),rnd_mode);
}
inline const mpreal pow(const mpreal& a, const long double b, mp_rnd_t rnd_mode)
{
return pow(a,mpreal(b),rnd_mode);
}
inline const mpreal pow(const mpreal& a, const double b, mp_rnd_t rnd_mode)
{
return pow(a,mpreal(b),rnd_mode);
}
inline const mpreal pow(const unsigned long int a, const mpreal& b, mp_rnd_t rnd_mode = mpreal::get_default_rnd())
{
mpreal x(a);
mpfr_ui_pow(x.mp,a,b.mp,rnd_mode);
return x;
}
inline const mpreal pow(const unsigned int a, const mpreal& b, mp_rnd_t rnd_mode)
{
return pow(static_cast<unsigned long int>(a),b,rnd_mode);
}
inline const mpreal pow(const long int a, const mpreal& b, mp_rnd_t rnd_mode)
{
if (a>=0) return pow(static_cast<unsigned long int>(a),b,rnd_mode);
else return pow(mpreal(a),b,rnd_mode);
}
inline const mpreal pow(const int a, const mpreal& b, mp_rnd_t rnd_mode)
{
if (a>=0) return pow(static_cast<unsigned long int>(a),b,rnd_mode);
else return pow(mpreal(a),b,rnd_mode);
}
inline const mpreal pow(const long double a, const mpreal& b, mp_rnd_t rnd_mode)
{
return pow(mpreal(a),b,rnd_mode);
}
inline const mpreal pow(const double a, const mpreal& b, mp_rnd_t rnd_mode)
{
return pow(mpreal(a),b,rnd_mode);
}
// pow unsigned long int
inline const mpreal pow(const unsigned long int a, const unsigned long int b, mp_rnd_t rnd_mode)
{
mpreal x(a);
mpfr_ui_pow_ui(x.mp,a,b,rnd_mode);
return x;
}
inline const mpreal pow(const unsigned long int a, const unsigned int b, mp_rnd_t rnd_mode)
{
return pow(a,static_cast<unsigned long int>(b),rnd_mode); //mpfr_ui_pow_ui
}
inline const mpreal pow(const unsigned long int a, const long int b, mp_rnd_t rnd_mode)
{
if(b>0) return pow(a,static_cast<unsigned long int>(b),rnd_mode); //mpfr_ui_pow_ui
else return pow(a,mpreal(b),rnd_mode); //mpfr_ui_pow
}
inline const mpreal pow(const unsigned long int a, const int b, mp_rnd_t rnd_mode)
{
if(b>0) return pow(a,static_cast<unsigned long int>(b),rnd_mode); //mpfr_ui_pow_ui
else return pow(a,mpreal(b),rnd_mode); //mpfr_ui_pow
}
inline const mpreal pow(const unsigned long int a, const long double b, mp_rnd_t rnd_mode)
{
return pow(a,mpreal(b),rnd_mode); //mpfr_ui_pow
}
inline const mpreal pow(const unsigned long int a, const double b, mp_rnd_t rnd_mode)
{
return pow(a,mpreal(b),rnd_mode); //mpfr_ui_pow
}
// pow unsigned int
inline const mpreal pow(const unsigned int a, const unsigned long int b, mp_rnd_t rnd_mode)
{
return pow(static_cast<unsigned long int>(a),b,rnd_mode); //mpfr_ui_pow_ui
}
inline const mpreal pow(const unsigned int a, const unsigned int b, mp_rnd_t rnd_mode)
{
return pow(static_cast<unsigned long int>(a),static_cast<unsigned long int>(b),rnd_mode); //mpfr_ui_pow_ui
}
inline const mpreal pow(const unsigned int a, const long int b, mp_rnd_t rnd_mode)
{
if(b>0) return pow(static_cast<unsigned long int>(a),static_cast<unsigned long int>(b),rnd_mode); //mpfr_ui_pow_ui
else return pow(static_cast<unsigned long int>(a),mpreal(b),rnd_mode); //mpfr_ui_pow
}
inline const mpreal pow(const unsigned int a, const int b, mp_rnd_t rnd_mode)
{
if(b>0) return pow(static_cast<unsigned long int>(a),static_cast<unsigned long int>(b),rnd_mode); //mpfr_ui_pow_ui
else return pow(static_cast<unsigned long int>(a),mpreal(b),rnd_mode); //mpfr_ui_pow
}
inline const mpreal pow(const unsigned int a, const long double b, mp_rnd_t rnd_mode)
{
return pow(static_cast<unsigned long int>(a),mpreal(b),rnd_mode); //mpfr_ui_pow
}
inline const mpreal pow(const unsigned int a, const double b, mp_rnd_t rnd_mode)
{
return pow(static_cast<unsigned long int>(a),mpreal(b),rnd_mode); //mpfr_ui_pow
}
// pow long int
inline const mpreal pow(const long int a, const unsigned long int b, mp_rnd_t rnd_mode)
{
if (a>0) return pow(static_cast<unsigned long int>(a),b,rnd_mode); //mpfr_ui_pow_ui
else return pow(mpreal(a),b,rnd_mode); //mpfr_pow_ui
}
inline const mpreal pow(const long int a, const unsigned int b, mp_rnd_t rnd_mode)
{
if (a>0) return pow(static_cast<unsigned long int>(a),static_cast<unsigned long int>(b),rnd_mode); //mpfr_ui_pow_ui
else return pow(mpreal(a),static_cast<unsigned long int>(b),rnd_mode); //mpfr_pow_ui
}
inline const mpreal pow(const long int a, const long int b, mp_rnd_t rnd_mode)
{
if (a>0)
{
if(b>0) return pow(static_cast<unsigned long int>(a),static_cast<unsigned long int>(b),rnd_mode); //mpfr_ui_pow_ui
else return pow(static_cast<unsigned long int>(a),mpreal(b),rnd_mode); //mpfr_ui_pow
}else{
return pow(mpreal(a),b,rnd_mode); // mpfr_pow_si
}
}
inline const mpreal pow(const long int a, const int b, mp_rnd_t rnd_mode)
{
if (a>0)
{
if(b>0) return pow(static_cast<unsigned long int>(a),static_cast<unsigned long int>(b),rnd_mode); //mpfr_ui_pow_ui
else return pow(static_cast<unsigned long int>(a),mpreal(b),rnd_mode); //mpfr_ui_pow
}else{
return pow(mpreal(a),static_cast<long int>(b),rnd_mode); // mpfr_pow_si
}
}
inline const mpreal pow(const long int a, const long double b, mp_rnd_t rnd_mode)
{
if (a>=0) return pow(static_cast<unsigned long int>(a),mpreal(b),rnd_mode); //mpfr_ui_pow
else return pow(mpreal(a),mpreal(b),rnd_mode); //mpfr_pow
}
inline const mpreal pow(const long int a, const double b, mp_rnd_t rnd_mode)
{
if (a>=0) return pow(static_cast<unsigned long int>(a),mpreal(b),rnd_mode); //mpfr_ui_pow
else return pow(mpreal(a),mpreal(b),rnd_mode); //mpfr_pow
}
// pow int
inline const mpreal pow(const int a, const unsigned long int b, mp_rnd_t rnd_mode)
{
if (a>0) return pow(static_cast<unsigned long int>(a),b,rnd_mode); //mpfr_ui_pow_ui
else return pow(mpreal(a),b,rnd_mode); //mpfr_pow_ui
}
inline const mpreal pow(const int a, const unsigned int b, mp_rnd_t rnd_mode)
{
if (a>0) return pow(static_cast<unsigned long int>(a),static_cast<unsigned long int>(b),rnd_mode); //mpfr_ui_pow_ui
else return pow(mpreal(a),static_cast<unsigned long int>(b),rnd_mode); //mpfr_pow_ui
}
inline const mpreal pow(const int a, const long int b, mp_rnd_t rnd_mode)
{
if (a>0)
{
if(b>0) return pow(static_cast<unsigned long int>(a),static_cast<unsigned long int>(b),rnd_mode); //mpfr_ui_pow_ui
else return pow(static_cast<unsigned long int>(a),mpreal(b),rnd_mode); //mpfr_ui_pow
}else{
return pow(mpreal(a),b,rnd_mode); // mpfr_pow_si
}
}
inline const mpreal pow(const int a, const int b, mp_rnd_t rnd_mode)
{
if (a>0)
{
if(b>0) return pow(static_cast<unsigned long int>(a),static_cast<unsigned long int>(b),rnd_mode); //mpfr_ui_pow_ui
else return pow(static_cast<unsigned long int>(a),mpreal(b),rnd_mode); //mpfr_ui_pow
}else{
return pow(mpreal(a),static_cast<long int>(b),rnd_mode); // mpfr_pow_si
}
}
inline const mpreal pow(const int a, const long double b, mp_rnd_t rnd_mode)
{
if (a>=0) return pow(static_cast<unsigned long int>(a),mpreal(b),rnd_mode); //mpfr_ui_pow
else return pow(mpreal(a),mpreal(b),rnd_mode); //mpfr_pow
}
inline const mpreal pow(const int a, const double b, mp_rnd_t rnd_mode)
{
if (a>=0) return pow(static_cast<unsigned long int>(a),mpreal(b),rnd_mode); //mpfr_ui_pow
else return pow(mpreal(a),mpreal(b),rnd_mode); //mpfr_pow
}
// pow long double
inline const mpreal pow(const long double a, const long double b, mp_rnd_t rnd_mode)
{
return pow(mpreal(a),mpreal(b),rnd_mode);
}
inline const mpreal pow(const long double a, const unsigned long int b, mp_rnd_t rnd_mode)
{
return pow(mpreal(a),b,rnd_mode); //mpfr_pow_ui
}
inline const mpreal pow(const long double a, const unsigned int b, mp_rnd_t rnd_mode)
{
return pow(mpreal(a),static_cast<unsigned long int>(b),rnd_mode); //mpfr_pow_ui
}
inline const mpreal pow(const long double a, const long int b, mp_rnd_t rnd_mode)
{
return pow(mpreal(a),b,rnd_mode); // mpfr_pow_si
}
inline const mpreal pow(const long double a, const int b, mp_rnd_t rnd_mode)
{
return pow(mpreal(a),static_cast<long int>(b),rnd_mode); // mpfr_pow_si
}
inline const mpreal pow(const double a, const double b, mp_rnd_t rnd_mode)
{
return pow(mpreal(a),mpreal(b),rnd_mode);
}
inline const mpreal pow(const double a, const unsigned long int b, mp_rnd_t rnd_mode)
{
return pow(mpreal(a),b,rnd_mode); // mpfr_pow_ui
}
inline const mpreal pow(const double a, const unsigned int b, mp_rnd_t rnd_mode)
{
return pow(mpreal(a),static_cast<unsigned long int>(b),rnd_mode); // mpfr_pow_ui
}
inline const mpreal pow(const double a, const long int b, mp_rnd_t rnd_mode)
{
return pow(mpreal(a),b,rnd_mode); // mpfr_pow_si
}
inline const mpreal pow(const double a, const int b, mp_rnd_t rnd_mode)
{
return pow(mpreal(a),static_cast<long int>(b),rnd_mode); // mpfr_pow_si
}
} // End of mpfr namespace
// Explicit specialization of std::swap for mpreal numbers
// Thus standard algorithms will use efficient version of swap (due to Koenig lookup)
// Non-throwing swap C++ idiom: http://en.wikibooks.org/wiki/More_C%2B%2B_Idioms/Non-throwing_swap
namespace std
{
// we are allowed to extend namespace std with specializations only
template <>
inline void swap(mpfr::mpreal& x, mpfr::mpreal& y)
{
return mpfr::swap(x, y);
}
template<>
class numeric_limits<mpfr::mpreal>
{
public:
static const bool is_specialized = true;
static const bool is_signed = true;
static const bool is_integer = false;
static const bool is_exact = false;
static const int radix = 2;
static const bool has_infinity = true;
static const bool has_quiet_NaN = true;
static const bool has_signaling_NaN = true;
static const bool is_iec559 = true; // = IEEE 754
static const bool is_bounded = true;
static const bool is_modulo = false;
static const bool traps = true;
static const bool tinyness_before = true;
static const float_denorm_style has_denorm = denorm_absent;
inline static mpfr::mpreal (min) (mp_prec_t precision = mpfr::mpreal::get_default_prec()) { return mpfr::minval(precision); }
inline static mpfr::mpreal (max) (mp_prec_t precision = mpfr::mpreal::get_default_prec()) { return mpfr::maxval(precision); }
inline static mpfr::mpreal lowest (mp_prec_t precision = mpfr::mpreal::get_default_prec()) { return -mpfr::maxval(precision); }
// Returns smallest eps such that 1 + eps != 1 (classic machine epsilon)
inline static mpfr::mpreal epsilon(mp_prec_t precision = mpfr::mpreal::get_default_prec()) { return mpfr::machine_epsilon(precision); }
// Returns smallest eps such that x + eps != x (relative machine epsilon)
inline static mpfr::mpreal epsilon(const mpfr::mpreal& x) { return mpfr::machine_epsilon(x); }
inline static mpfr::mpreal round_error(mp_prec_t precision = mpfr::mpreal::get_default_prec())
{
mp_rnd_t r = mpfr::mpreal::get_default_rnd();
if(r == GMP_RNDN) return mpfr::mpreal(0.5, precision);
else return mpfr::mpreal(1.0, precision);
}
inline static const mpfr::mpreal infinity() { return mpfr::const_infinity(); }
inline static const mpfr::mpreal quiet_NaN() { return mpfr::mpreal().setNan(); }
inline static const mpfr::mpreal signaling_NaN() { return mpfr::mpreal().setNan(); }
inline static const mpfr::mpreal denorm_min() { return (min)(); }
// Please note, exponent range is not fixed in MPFR
static const int min_exponent = MPFR_EMIN_DEFAULT;
static const int max_exponent = MPFR_EMAX_DEFAULT;
MPREAL_PERMISSIVE_EXPR static const int min_exponent10 = (int) (MPFR_EMIN_DEFAULT * 0.3010299956639811);
MPREAL_PERMISSIVE_EXPR static const int max_exponent10 = (int) (MPFR_EMAX_DEFAULT * 0.3010299956639811);
#ifdef MPREAL_HAVE_DYNAMIC_STD_NUMERIC_LIMITS
// Following members should be constant according to standard, but they can be variable in MPFR
// So we define them as functions here.
//
// This is preferable way for std::numeric_limits<mpfr::mpreal> specialization.
// But it is incompatible with standard std::numeric_limits and might not work with other libraries, e.g. boost.
// See below for compatible implementation.
inline static float_round_style round_style()
{
mp_rnd_t r = mpfr::mpreal::get_default_rnd();
switch (r)
{
case GMP_RNDN: return round_to_nearest;
case GMP_RNDZ: return round_toward_zero;
case GMP_RNDU: return round_toward_infinity;
case GMP_RNDD: return round_toward_neg_infinity;
default: return round_indeterminate;
}
}
inline static int digits() { return int(mpfr::mpreal::get_default_prec()); }
inline static int digits(const mpfr::mpreal& x) { return x.getPrecision(); }
inline static int digits10(mp_prec_t precision = mpfr::mpreal::get_default_prec())
{
return mpfr::bits2digits(precision);
}
inline static int digits10(const mpfr::mpreal& x)
{
return mpfr::bits2digits(x.getPrecision());
}
inline static int max_digits10(mp_prec_t precision = mpfr::mpreal::get_default_prec())
{
return digits10(precision);
}
#else
// Digits and round_style are NOT constants when it comes to mpreal.
// If possible, please use functions digits() and round_style() defined above.
//
// These (default) values are preserved for compatibility with existing libraries, e.g. boost.
// Change them accordingly to your application.
//
// For example, if you use 256 bits of precision uniformly in your program, then:
// digits = 256
// digits10 = 77
// max_digits10 = 78
//
// Approximate formula for decimal digits is: digits10 = floor(log10(2) * digits). See bits2digits() for more details.
static const std::float_round_style round_style = round_to_nearest;
static const int digits = 53;
static const int digits10 = 15;
static const int max_digits10 = 16;
#endif
};
}
#endif /* __MPREAL_H__ */
| 115,135 | 36.080837 | 177 | h |
abess | abess-master/python/pytest/test_alg.py | import sys
import abess
import pytest
import numpy as np
from scipy.sparse import coo_matrix
from sklearn.metrics import ndcg_score
from sklearn.model_selection import KFold, GridSearchCV
from sklearn.linear_model import (
LinearRegression,
LogisticRegression,
PoissonRegressor)
from sklearn.utils.estimator_checks import check_estimator
try:
import pandas as pd
from lifelines import CoxPHFitter
miss_dep = False
except ImportError:
miss_dep = True
from utilities import ( # noqa: F401
assert_nan,
assert_value,
assert_fit,
save_data,
load_data)
@pytest.mark.filterwarnings("ignore")
class TestAlgorithm:
"""
Test for each algorithm.
"""
@staticmethod
def test_gaussian():
np.random.seed(2)
n = 100
p = 20
k = 3
family = "gaussian"
rho = 0.1
data = abess.make_glm_data(family=family, n=n, p=p, k=k, rho=rho)
test_data = abess.make_glm_data(
family=family, n=n, p=p, k=k, rho=rho, coef_=data.coef_)
# save_data(data, 'gaussian')
# save_data(test_data, 'gaussian_test')
data = load_data("gaussian")
test_data = load_data("gaussian_test")
def assert_reg(coef, fit_intercept=True, rel=0.01, abs=0.01):
if (sys.version_info[0] < 3 or sys.version_info[1] < 6):
return
nonzero = np.nonzero(coef)[0]
new_x = data.x[:, nonzero]
reg = LinearRegression(fit_intercept=fit_intercept)
reg.fit(new_x, data.y.reshape(-1))
assert_value(coef[nonzero], reg.coef_, rel, abs)
# null
check_estimator(abess.LinearRegression())
model1 = abess.LinearRegression()
model1.fit(data.x, data.y)
assert_fit(model1.coef_, data.coef_)
assert_reg(model1.coef_)
model0 = abess.LinearRegression(fit_intercept=False)
model0.fit(data.x, data.y)
assert model0.intercept_ == 0
assert_fit(model0.coef_, data.coef_)
assert_reg(model0.coef_, fit_intercept=False)
# predict
y = model1.predict(test_data.x)
assert_nan(y)
# score
score = model1.score(test_data.x, test_data.y)
sample_weight = np.random.rand(n)
score = model1.score(test_data.x, test_data.y,
sample_weight=sample_weight)
assert score > 0.5
# covariance update
model2 = abess.LinearRegression(covariance_update=True)
model2.fit(data.x, data.y)
assert_value(model1.coef_, model2.coef_)
model3 = abess.LinearRegression(
covariance_update=True,
important_search=10,
screening_size=20,
cv=5)
model3.fit(data.x, data.y)
assert_fit(model3.coef_, data.coef_)
model4 = abess.LinearRegression(
covariance_update=True, path_type='gs', cv=5)
cv_fold_id = np.repeat(np.linspace(1, 5, 5), int(n / 5))
model4.fit(data.x, data.y, cv_fold_id=cv_fold_id)
assert_fit(model4.coef_, data.coef_)
@staticmethod
def test_binomial():
np.random.seed(2)
n = 300
p = 20
k = 3
family = "binomial"
rho = 0.5
sigma = 1
data = abess.make_glm_data(
family=family,
n=n,
p=p,
k=k,
rho=rho,
sigma=sigma)
test_data = abess.make_glm_data(
family=family,
n=n,
p=p,
k=k,
rho=rho,
sigma=sigma,
coef_=data.coef_)
# save_data(data, 'binomial')
# save_data(test_data, 'binomial_test')
data = load_data("binomial")
test_data = load_data("binomial_test")
def assert_reg(coef, fit_intercept=True, rel=0.01, abs=0.01):
if sys.version_info[0] + 0.1 * sys.version_info[1] < 3.6:
return
nonzero = np.nonzero(coef)[0]
new_x = data.x[:, nonzero]
reg = LogisticRegression(
penalty="none", fit_intercept=fit_intercept)
reg.fit(new_x, data.y)
assert_value(coef[nonzero], reg.coef_, rel, abs)
# null
check_estimator(abess.LogisticRegression())
model1 = abess.LogisticRegression()
model1.fit(data.x, data.y)
assert_fit(model1.coef_, data.coef_)
assert_reg(model1.coef_)
model0 = abess.LogisticRegression(fit_intercept=False)
model0.fit(data.x, data.y)
assert model0.intercept_ == 0
assert_fit(model0.coef_, data.coef_)
assert_reg(model0.coef_, fit_intercept=False)
# predict
prob = model1.predict_proba(test_data.x)
assert_nan(prob)
y = model1.predict(test_data.x)
assert_nan(y)
# score
score = model1.score(test_data.x, test_data.y)
sample_weight = np.random.rand(n)
score = model1.score(test_data.x, test_data.y,
sample_weight=sample_weight)
assert score > 0.5
# approximate Newton
model2 = abess.LogisticRegression(approximate_Newton=True)
model2.fit(data.x, data.y)
assert_fit(model1.coef_, model2.coef_)
@staticmethod
def test_cox():
np.random.seed(2)
n = 200
p = 20
k = 3
family = "cox"
rho = 0.5
sigma = 1
data = abess.make_glm_data(
n, p, family=family, k=k, rho=rho, sigma=sigma)
# save_data(data, 'cox')
data = load_data("cox")
def assert_reg(coef):
if miss_dep:
pytest.skip(
"Skip because modules 'pandas' or 'lifelines'"
" have not been installed.")
if sys.version_info[0] + 0.1 * sys.version_info[1] < 3.6:
pytest.skip("Skip because requiring python3.6 or higher.")
nonzero = np.nonzero(coef)[0]
new_x = data.x[:, nonzero]
survival = pd.DataFrame()
for i in range(new_x.shape[1]):
survival["Var" + str(i)] = new_x[:, i]
survival["T"] = data.y[:, 0]
survival["E"] = data.y[:, 1]
cph = CoxPHFitter(penalizer=0, l1_ratio=0)
cph.fit(survival, 'T', event_col='E')
assert_value(coef[nonzero], cph.params_.values, rel=5e-1, abs=5e-1)
# null
check_estimator(abess.CoxPHSurvivalAnalysis())
model1 = abess.CoxPHSurvivalAnalysis()
model1.fit(data.x, data.y)
assert_fit(model1.coef_, data.coef_)
assert_reg(model1.coef_)
# predict
y = model1.predict(data.x)
assert_nan(y)
# score
score = model1.score(data.x, data.y)
sample_weight = np.random.rand(n)
score = model1.score(data.x, data.y,
sample_weight=sample_weight)
assert not np.isnan(score)
# approximate Newton
model2 = abess.CoxPHSurvivalAnalysis(approximate_Newton=True)
model2.fit(data.x, data.y)
# assert_fit(model1.coef_, model2.coef_) # TODO
assert_reg(model2.coef_)
# survival function
surv = model1.predict_survival_function(data.x)
time_points = np.quantile(data.y[:, 0], np.linspace(0, 0.6, 100))
surv[0](time_points)
@staticmethod
def test_poisson():
np.random.seed(2)
n = 100
p = 20
k = 3
family = "poisson"
rho = 0.5
sigma = 1
data = abess.make_glm_data(
n, p, family=family, k=k, rho=rho, sigma=sigma)
test_data = abess.make_glm_data(
n, p, family=family, k=k, rho=rho, sigma=sigma, coef_=data.coef_)
# save_data(data, 'poisson')
# save_data(test_data, 'poisson_test')
data = load_data("poisson")
test_data = load_data("poisson_test")
def assert_reg(coef, fit_intercept=True, rel=0.1, abs=0.1):
if sys.version_info[0] + 0.1 * sys.version_info[1] < 3.6:
return
nonzero = np.nonzero(coef)[0]
new_x = data.x[:, nonzero]
reg = PoissonRegressor(
fit_intercept=fit_intercept,
alpha=0, tol=1e-6, max_iter=200)
reg.fit(new_x, data.y)
assert_value(coef[nonzero], reg.coef_, rel, abs)
# null
check_estimator(abess.PoissonRegression())
model1 = abess.PoissonRegression()
model1.fit(data.x, data.y)
assert_fit(model1.coef_, data.coef_)
assert_reg(model1.coef_)
model0 = abess.PoissonRegression(fit_intercept=False)
model0.fit(data.x, data.y)
assert model0.intercept_ == 0
assert_fit(model0.coef_, data.coef_)
assert_reg(model0.coef_, fit_intercept=False)
# predict
y = model1.predict(test_data.x)
assert_nan(y)
# score
score = model1.score(test_data.x, test_data.y)
sample_weight = np.random.rand(n)
score = model1.score(test_data.x, test_data.y,
sample_weight=sample_weight)
assert score > 0.5
# approximate Newton
model2 = abess.PoissonRegression(approximate_Newton=True)
model2.fit(data.x, data.y)
assert_fit(model1.coef_, model2.coef_)
assert_reg(model2.coef_)
@staticmethod
def test_multigaussian():
np.random.seed(1)
n = 100
p = 20
k = 3
family = "multigaussian"
rho = 0.5
M = 3
data = abess.make_multivariate_glm_data(
family=family, n=n, p=p, k=k, rho=rho, M=M)
test_data = abess.make_multivariate_glm_data(
family=family, n=n, p=p, k=k, rho=rho, M=M, coef_=data.coef_)
# save_data(data, "multigaussian")
# save_data(test_data, "multigaussian_test")
data = load_data("multigaussian")
test_data = load_data("multigaussian_test")
# null
check_estimator(abess.MultiTaskRegression())
model1 = abess.MultiTaskRegression()
model1.fit(data.x, data.y)
assert_fit(model1.coef_, data.coef_)
model0 = abess.MultiTaskRegression(fit_intercept=False)
model0.fit(data.x, data.y)
assert np.count_nonzero(model0.intercept_) == 0
assert_fit(model0.coef_, data.coef_)
# predict
y = model1.predict(test_data.x)
assert_nan(y)
# score
score = model1.score(test_data.x, test_data.y)
sample_weight = np.random.rand(n)
score = model1.score(test_data.x, test_data.y,
sample_weight=sample_weight)
assert score > 0.5
# covariance update
model2 = abess.MultiTaskRegression(covariance_update=True)
model2.fit(data.x, data.y)
assert_value(model1.coef_, model2.coef_)
model3 = abess.MultiTaskRegression(
covariance_update=True,
important_search=10,
screening_size=20,
cv=5)
model3.fit(data.x, data.y)
assert_fit(model3.coef_, data.coef_)
model4 = abess.MultiTaskRegression(
covariance_update=True, path_type='gs', cv=5)
cv_fold_id = np.repeat(np.linspace(1, 5, 5), int(n / 5))
model4.fit(data.x, data.y, cv_fold_id=cv_fold_id)
assert_fit(model4.coef_, data.coef_)
@staticmethod
def test_multinomial():
np.random.seed(5)
n = 100
p = 20
k = 3
family = "multinomial"
rho = 0.5
M = 3
data = abess.make_multivariate_glm_data(
family=family, n=n, p=p, k=k, rho=rho, M=M)
test_data = abess.make_multivariate_glm_data(
family=family, n=n, p=p, k=k, rho=rho, M=M, coef_=data.coef_)
# save_data(data, 'multinomial')
# save_data(test_data, 'multinomial_test')
data = load_data('multinomial')
test_data = load_data('multinomial_test')
# null
check_estimator(abess.MultinomialRegression())
model1 = abess.MultinomialRegression()
model1.fit(data.x, data.y)
assert_fit(model1.coef_, data.coef_)
model0 = abess.MultinomialRegression(fit_intercept=False)
model0.fit(data.x, data.y)
assert np.count_nonzero(model0.intercept_) == 0
assert_fit(model0.coef_, data.coef_)
# predict
y = model1.predict(test_data.x)
assert_nan(y)
# score
score = model1.score(test_data.x, test_data.y)
sample_weight = np.random.rand(n)
score = model1.score(test_data.x, test_data.y,
sample_weight=sample_weight)
assert score > 0.5
# # approximate Newton
# model2 = abess.MultinomialRegression(approximate_Newton=True)
# model2.fit(data.x, data.y)
# assert_fit(model1.coef_, model2.coef_)
# categorical y
cate_y = np.repeat(np.arange(n / 10), 10)
model1.fit(data.x, cate_y)
score = model1.score(data.x, cate_y,
sample_weight=sample_weight)
assert not np.isnan(score)
@staticmethod
def test_PCA():
np.random.seed(1)
n = 1000
p = 20
s = 10
group_size = 5
group_num = 4
support_size = np.zeros((p, 1))
support_size[s - 1, 0] = 1
x1 = np.random.randn(n, 1)
x1 /= np.linalg.norm(x1)
X = x1.dot(np.random.randn(1, p)) + 0.01 * np.random.randn(n, p)
X = X - X.mean(axis=0)
g_index = np.arange(group_num)
g_index = g_index.repeat(group_size)
# save_data(X, 'PCA')
X = load_data('PCA')
# null
check_estimator(abess.SparsePCA())
model1 = abess.SparsePCA(support_size=support_size)
model1.fit(X)
assert np.count_nonzero(model1.coef_) == s
# ratio & transform
model1.ratio(X)
model1.transform(X)
model1.fit_transform(X)
# sparse
model2 = abess.SparsePCA(support_size=s)
model2.fit(coo_matrix(X), sparse_matrix=True)
print("coef1: ", np.unique(np.nonzero(model1.coef_)[0]))
print("coef2: ", np.unique(np.nonzero(model2.coef_)[0]))
assert_value(model1.coef_, model2.coef_)
model2 = abess.SparsePCA(support_size=s)
model2.fit(X, sparse_matrix=True)
assert_value(model1.coef_, model2.coef_)
# sigma input
model3 = abess.SparsePCA(support_size=support_size)
model3.fit(Sigma=X.T.dot(X))
model3.fit(Sigma=np.cov(X.T), n=n)
assert_fit(model1.coef_, model3.coef_)
# KPCA
support_size_m = np.hstack((support_size, support_size, support_size))
model4 = abess.SparsePCA(support_size=support_size_m)
model4.fit(X, number=3)
assert model4.coef_.shape[1] == 3
for i in range(3):
coef = np.nonzero(model4.coef_[:, i])[0]
assert len(coef) == s
model4.ratio(X)
# group
support_size_g = np.zeros((4, 1))
support_size_g[1, 0] = 1
group = np.repeat([0, 1, 2, 3], [5, 5, 5, 5])
model5 = abess.SparsePCA(support_size=support_size_g, group=group)
model5.fit(X)
coef = g_index[np.nonzero(model5.coef_)[0]]
assert len(coef) == 10
assert len(np.unique(coef)) == 2
# screening
model6 = abess.SparsePCA(support_size=support_size, screening_size=20)
model6.fit(X)
assert_nan(model6.coef_)
# ic
for ic in ['loss', 'aic', 'bic', 'ebic', 'gic', 'hic']:
model = abess.SparsePCA(support_size=support_size, ic_type=ic)
model.fit(X)
# A_init
model = abess.SparsePCA(support_size=support_size, A_init=[0, 1, 2])
model.fit(X)
@staticmethod
def test_gamma():
np.random.seed(0)
n = 10000
p = 20
k = 3
data = abess.make_glm_data(n=n, p=p, k=k, family="gamma")
# save_data(data, 'gamma')
data = load_data('gamma')
# null
check_estimator(abess.GammaRegression())
model1 = abess.GammaRegression(support_size=k)
model1.fit(data.x, data.y)
assert_nan(model1.coef_)
assert_fit(data.coef_, model1.coef_)
assert_value(data.coef_, model1.coef_, 1., 1.)
model0 = abess.GammaRegression(support_size=k, fit_intercept=False)
model0.fit(data.x, data.y)
assert model0.intercept_ == 0
assert_nan(model0.coef_)
# predict
model1.predict(data.x)
# score
score = model1.score(data.x, data.y)
sample_weight = np.random.rand(n)
score = model1.score(data.x, data.y,
sample_weight=sample_weight)
assert not np.isnan(score)
@staticmethod
def test_RPCA():
np.random.seed(2)
n = 100
p = 20
s = 30
r = 5
L = np.random.rand(n, r) @ np.random.rand(r, p)
nonzero = np.random.choice(n * p, s, replace=False)
S = np.zeros(n * p)
S[nonzero] = np.random.rand(s) * 10
S = S.reshape(p, n).T
X = L + S
# save_data(X, 'RPCA')
X = load_data('RPCA')
# null
check_estimator(abess.RobustPCA())
model1 = abess.RobustPCA(support_size=s)
model1.fit(X)
model1.fit(X, r=r)
# assert_fit(model1.coef_, S)
# sparse
model2 = abess.RobustPCA(support_size=s)
model2.fit(coo_matrix(X), r=r)
assert_value(model1.coef_, model2.coef_)
model2 = abess.RobustPCA(support_size=s)
model2.fit(X, r=r, sparse_matrix=True)
assert_value(model1.coef_, model2.coef_)
# # group
# group = np.arange(n * p)
# model3 = abess.RobustPCA(support_size=s, group=group)
# model3.fit(X, r=r)
# ic
for ic in ['aic', 'bic', 'ebic', 'gic', 'hic']:
model4 = abess.RobustPCA(support_size=s, ic_type=ic)
model4.fit(X, r=r)
# always select
model5 = abess.RobustPCA(support_size=s, always_select=[1])
model5.fit(X, r=r)
@staticmethod
def test_ordinal():
np.random.seed(2)
data = abess.make_glm_data(n=100, p=20, k=5, family="ordinal")
# save_data(data, 'ordinal')
data = load_data('ordinal')
# null
check_estimator(abess.OrdinalRegression())
model1 = abess.OrdinalRegression()
model1.fit(data.x, data.y)
assert_fit(model1.coef_, data.coef_)
# score
sample_weight = np.random.rand(100)
score_ordinal = model1.score(data.x, data.y,
sample_weight=sample_weight)
score_ordinal = model1.score(data.x, data.y)
y_random = data.y.copy()
np.random.shuffle(y_random)
score_random = ndcg_score(data.y.reshape(
(1, -1)), y_random.reshape((1, -1)))
assert score_ordinal > score_random
pred = model1.predict(data.x)
print((pred != data.y).sum())
# assert (pred == data.y)
@staticmethod
def test_gaussian_sklearn():
np.random.seed(7)
n = 100
p = 20
k = 3
family = "gaussian"
rho = 0.5
s_max = 20
data = abess.make_glm_data(n, p, family=family, k=k, rho=rho)
# save_data(data, 'gaussian_sklearn')
data = load_data('gaussian_sklearn')
support_size = np.linspace(0, s_max, s_max + 1, dtype="int32")
alpha = [0., 0.1, 0.2, 0.3, 0.4]
try:
model = abess.LinearRegression()
cv = KFold(n_splits=5, shuffle=True, random_state=0)
gcv = GridSearchCV(
model,
param_grid={"support_size": support_size,
"important_search": [10],
"alpha": alpha},
cv=cv,
n_jobs=5).fit(data.x, data.y)
assert gcv.best_params_["support_size"] == k
assert gcv.best_params_["alpha"] == 0.
except BaseException:
assert False
@staticmethod
def test_binomial_sklearn():
n = 500
p = 20
k = 3
family = "binomial"
rho = 0.5
sigma = 1
np.random.seed(2)
data = abess.make_glm_data(
n, p, family=family, k=k, rho=rho, sigma=sigma)
# data3 = abess.make_multivariate_glm_data(
# family=family, n=n, p=p, k=k, rho=rho, M=M, sparse_ratio=0.1)
# save_data(data, "binomial_sklearn")
data = load_data("binomial_sklearn")
s_max = 20
support_size = np.linspace(0, s_max, s_max + 1, dtype="int32")
alpha = [0., 0.1, 0.2, 0.3, 0.4]
model = abess.LogisticRegression()
cv = KFold(n_splits=5, shuffle=True, random_state=0)
gcv = GridSearchCV(
model,
param_grid={"support_size": support_size,
"important_search": [10],
"alpha": alpha},
cv=cv,
n_jobs=5).fit(data.x, data.y)
assert gcv.best_params_["support_size"] == k
assert gcv.best_params_["alpha"] == 0.
@staticmethod
def test_poisson_sklearn():
n = 100
p = 20
k = 3
family = "poisson"
rho = 0.5
# sigma = 1
# M = 1
np.random.seed(3)
data = abess.make_glm_data(n, p, family=family, k=k, rho=rho)
# data3 = abess.make_multivariate_glm_data(
# family=family, n=n, p=p, k=k, rho=rho, M=M, sparse_ratio=0.1)
# save_data(data, "poisson_sklearn")
data = load_data("poisson_sklearn")
s_max = 20
support_size = np.linspace(0, s_max, s_max + 1, dtype="int32")
alpha = [0., 0.1, 0.2, 0.3, 0.4]
model = abess.PoissonRegression()
cv = KFold(n_splits=5, shuffle=True, random_state=0)
gcv = GridSearchCV(
model,
param_grid={"support_size": support_size,
"important_search": [10],
"alpha": alpha},
cv=cv,
n_jobs=1).fit(data.x, data.y)
assert gcv.best_params_["support_size"] == k
# assert gcv.best_params_["alpha"] == 0.
@ staticmethod
def test_cox_sklearn():
n = 100
p = 20
k = 3
family = "cox"
rho = 0.5
# sigma = 1
# M = 1
np.random.seed(1)
data = abess.make_glm_data(n, p, family=family, k=k, rho=rho)
# data3 = abess.make_multivariate_glm_data(
# family=family, n=n, p=p, k=k, rho=rho, M=M, sparse_ratio=0.1)
# save_data(data, "cox_sklearn")
data = load_data("cox_sklearn")
s_max = 10
support_size = np.linspace(1, s_max, s_max + 1, dtype="int32")
alpha = [0., 0.1, 0.2, 0.3]
model = abess.CoxPHSurvivalAnalysis(
path_type="seq", support_size=support_size,
ic_type='ebic', screening_size=20,
s_min=1, s_max=p, cv=5,
exchange_num=2,
primary_model_fit_max_iter=30, primary_model_fit_epsilon=1e-6,
approximate_Newton=True, ic_coef=1., thread=5)
cv = KFold(n_splits=5, shuffle=True, random_state=0)
gcv = GridSearchCV(
model,
param_grid={"support_size": support_size,
"important_search": [10],
"alpha": alpha},
cv=cv,
n_jobs=1).fit(data.x, data.y)
assert gcv.best_params_["support_size"] == k
assert gcv.best_params_["alpha"] == 0.
# @staticmethod
# def test_multigaussian_sklearn():
# n = 100
# p = 20
# k = 3
# family = "multigaussian"
# rho = 0.5
# sigma = 1
# M = 1
# np.random.seed(2)
# data = abess.make_multivariate_glm_data(
# family=family, n=n, p=p, k=k, rho=rho, M=M)
# # data3 = abess.make_multivariate_glm_data(
# # family=family, n=n, p=p, k=k, rho=rho, M=M, sparse_ratio=0.1)
# s_max = 20
# support_size = np.linspace(1, s_max, s_max+1)
# alpha = [0., 0.1, 0.2, 0.3, 0.4]
# model = abess.MultiTaskRegression()
# cv = KFold(n_splits=5, shuffle=True, random_state=0)
# gcv = GridSearchCV(
# model,
# param_grid={"support_size": support_size,
# "alpha": alpha},
# cv=cv,
# n_jobs=1).fit(data.x, data.y)
# assert gcv.best_params_["support_size"] == k
# assert gcv.best_params_["alpha"] == 0.
# @staticmethod
# def test_multinomial_sklearn():
# n = 100
# p = 20
# k = 3
# family = "multinomial"
# rho = 0.5
# sigma = 1
# M = 1
# np.random.seed(2)
# data = abess.make_multivariate_glm_data(
# family=family, n=n, p=p, k=k, rho=rho, M=M)
# # data3 = abess.make_multivariate_glm_data(
# # family=family, n=n, p=p, k=k, rho=rho, M=M, sparse_ratio=0.1)
# s_max = 20
# support_size = np.linspace(0, s_max, s_max+1, dtype = "int32")
# alpha = [0., 0.1, 0.2, 0.3, 0.4]
# model = abess.MultinomialRegression()
# cv = KFold(n_splits=5, shuffle=True, random_state=0)
# gcv = GridSearchCV(
# model,
# param_grid={"support_size": support_size,
# "alpha": alpha},
# cv=cv,
# n_jobs=1).fit(data.x, data.y)
# assert gcv.best_params_["support_size"] == k
# assert gcv.best_params_["alpha"] == 0.
| 25,928 | 30.892989 | 79 | py |
abess | abess-master/python/pytest/test_check.py | import abess
import numpy as np
import pytest
@pytest.mark.filterwarnings("ignore")
class TestCheck:
"""
Test for argument error, which should be recognized before the algorithm.
"""
@staticmethod
def test_base():
# path
try:
model = abess.LinearRegression(path_type='other')
model.fit([[1]], [1])
except ValueError as e:
print(e)
else:
assert False
try:
model = abess.LinearRegression(support_size=[3])
model.fit([[1]], [1])
except ValueError as e:
print(e)
else:
assert False
try:
model = abess.LinearRegression(path_type='gs', s_min=1, s_max=0)
model.fit([[1]], [1])
except ValueError as e:
print(e)
else:
assert False
# ic
try:
model = abess.LinearRegression(ic_type='other')
model.fit([[1]], [1])
except ValueError as e:
print(e)
else:
assert False
try:
model = abess.LinearRegression(cv=2, cv_score='other')
model.fit([[1], [2]], [1, 2])
except ValueError as e:
print(e)
else:
assert False
# exchange_num
try:
model = abess.LinearRegression(exchange_num=-1)
model.fit([[1]], [1])
except ValueError as e:
print(e)
else:
assert False
# screening_size
try:
model = abess.LinearRegression(screening_size=3)
model.fit([[1]], [1])
except ValueError as e:
print(e)
else:
assert False
try:
model = abess.LinearRegression(support_size=[2],
screening_size=1)
model.fit([[1, 2, 3]], [1])
except ValueError as e:
print(e)
else:
assert False
# primary_fit_xxx
try:
model = abess.LogisticRegression(primary_model_fit_max_iter=0.5)
model.fit([[1]], [1])
except ValueError as e:
print(e)
else:
assert False
try:
model = abess.LogisticRegression(primary_model_fit_epsilon=-1)
model.fit([[1]], [1])
except ValueError as e:
print(e)
else:
assert False
try:
model = abess.LogisticRegression(primary_model_fit_epsilon=-1)
model.fit([[1]], [1])
except ValueError as e:
print(e)
else:
assert False
# thread
try:
model = abess.LinearRegression(thread=-1)
model.fit([[1]], [1])
except ValueError as e:
print(e)
else:
assert False
# splicing_type
try:
model = abess.LinearRegression(splicing_type=-1)
model.fit([[1]], [1])
except ValueError as e:
print(e)
else:
assert False
# cv & cv_fold_id
try:
model = abess.LinearRegression(cv=2)
model.fit([[1]], [1])
except ValueError as e:
print(e)
else:
assert False
try:
model = abess.LinearRegression(cv=2)
cv_fold_id = [[1], [2]]
model.fit([[1], [2]], [1, 2], cv_fold_id=cv_fold_id)
except ValueError as e:
print(e)
else:
assert False
try:
model = abess.LinearRegression(cv=2)
cv_fold_id = [1, 1]
model.fit([[1], [1]], [1, 1], cv_fold_id=cv_fold_id)
except ValueError as e:
print(e)
else:
assert False
try:
model = abess.LinearRegression(cv=2)
cv_fold_id = [1, 2, 1]
model.fit([[1], [1]], [1, 1], cv_fold_id=cv_fold_id)
except ValueError as e:
print(e)
else:
assert False
model = abess.LinearRegression()
# datatype error
try:
model.fit([['c', 1, 1]], [1])
except ValueError as e:
print(e)
else:
assert False
try:
model.fit([[1, 1, 1]], [1], sample_weight=['c'])
except ValueError as e:
print(e)
else:
assert False
try:
model1 = abess.LinearRegression(cv='c')
model1.fit([[1]], [1])
except ValueError as e:
print(e)
else:
assert False
# A_init
try:
model = abess.LinearRegression(A_init=[[0]])
model.fit([[1]], [1])
except ValueError as e:
print(e)
else:
assert False
try:
model = abess.LinearRegression(A_init=[2])
model.fit([[1]], [1])
except ValueError as e:
print(e)
else:
assert False
# imp search
try:
model = abess.LinearRegression(important_search=-1)
model.fit([[1]], [1])
except ValueError as e:
print(e)
else:
assert False
# incompatible shape
try:
model.fit([1, 1, 1], [1])
except ValueError as e:
print(e)
else:
assert False
try:
model.fit([[1, 1, 1]], [1, 2])
except ValueError as e:
print(e)
else:
assert False
try:
model.fit([[1, 1, 1]], [1])
model.predict([[1, 1]])
except ValueError as e:
print(e)
else:
assert False
try:
model.fit([[1, 1, 1]], [1], sample_weight=[1, 2])
except ValueError as e:
print(e)
else:
assert False
try:
model.fit([[1, 1, 1]], [1], sample_weight=[[1, 2, 3]])
except ValueError as e:
print(e)
else:
assert False
try:
model1 = abess.LinearRegression(group=[1])
model1.fit([[1, 1, 1]], [1])
except ValueError as e:
print(e)
else:
assert False
try:
model1 = abess.LinearRegression(group=[[1, 2, 3]])
model1.fit([[1, 1, 1]], [1])
except ValueError as e:
print(e)
else:
assert False
# new data
try:
data = abess.make_glm_data(n=100, p=10, k=3, family='gamma')
model = abess.GammaRegression()
model.fit(data.x, data.y)
model.score(data.x, data.y, [[1]])
except ValueError as e:
print(e)
else:
assert False
try:
data = abess.make_glm_data(n=100, p=10, k=3, family='gamma')
model = abess.GammaRegression()
model.fit(data.x, data.y)
model.score(data.x, data.y, [1])
except ValueError as e:
print(e)
else:
assert False
try:
data = abess.make_glm_data(n=100, p=10, k=3, family='gaussian')
model = abess.LinearRegression()
model.fit(data.x, data.y)
model.score(data.x[:, 1:], data.y)
except ValueError as e:
print(e)
else:
assert False
try:
data = abess.make_glm_data(
n=100, p=10, k=3, family='gaussian', corr_type="null")
except ValueError as e:
print(e)
else:
assert False
try:
data = abess.make_multivariate_glm_data(
n=100, p=10, k=3, family='gaussian', corr_type="null")
except ValueError as e:
print(e)
else:
assert False
# lack of necessary parameter
try:
model = abess.LinearRegression()
model.fit(X=[[1]])
except ValueError as e:
print(e)
else:
assert False
try:
model = abess.LinearRegression()
model.fit(y=[1])
except ValueError as e:
print(e)
else:
assert False
# constant column
try:
model = abess.LinearRegression()
model.fit(X=[[1, 1], [1, 2]], y=[1, 2])
except OverflowError as e:
print(e)
else:
assert False
@staticmethod
def test_pca():
"""
For `abess.decomposition.SparsePCA.fit`.
"""
model = abess.SparsePCA()
data = np.random.randn(100, 10)
# datatype error
try:
model.fit([['c']])
except ValueError as e:
print(e)
else:
assert False
try:
model.fit(Sigma=[['c']])
except ValueError as e:
print(e)
else:
assert False
try:
model.fit(Sigma=[[np.nan]])
except ValueError as e:
print(e)
else:
assert False
try:
model1 = abess.SparsePCA(cv='c')
model1.fit([[1]])
except ValueError as e:
print(e)
else:
assert False
# incompatible shape
try:
model.fit([1])
except ValueError as e:
print(e)
else:
assert False
try:
model.fit(Sigma=[1])
except ValueError as e:
print(e)
else:
assert False
try:
model1 = abess.SparsePCA(group=[1, 2])
model1.fit([[1]])
except ValueError as e:
print(e)
else:
assert False
try:
model1 = abess.SparsePCA(group=[[1]])
model1.fit([[1]])
except ValueError as e:
print(e)
else:
assert False
try:
model1 = abess.SparsePCA(support_size=np.array([1, 2]))
model1.fit([[1]])
except ValueError as e:
print(e)
else:
assert False
try:
model = abess.SparsePCA(screening_size=np.ones((100, 1)))
model.fit(data)
except ValueError as e:
print(e)
else:
assert False
# screening_size
model = abess.SparsePCA(screening_size=0)
model.fit(data)
try:
model = abess.SparsePCA(screening_size=100)
model.fit(data)
except ValueError as e:
print(e)
else:
assert False
try:
model = abess.SparsePCA(screening_size=1, support_size=2)
model.fit(data)
except ValueError as e:
print(e)
else:
assert False
# lack of necessary parameter
try:
model.fit()
except ValueError as e:
print(e)
else:
assert False
try:
model1 = abess.SparsePCA(cv=5)
model1.fit(Sigma=[[1]])
except ValueError as e:
print(e)
else:
assert False
# number
try:
model.fit([[1]], number=-1)
except ValueError as e:
print(e)
else:
assert False
# invalid sigma
try:
model.fit(Sigma=[[1, 0], [1, 0]])
except ValueError as e:
print(e)
else:
assert False
try:
model.fit(Sigma=[[-1, 0], [0, -1]])
except ValueError as e:
print(e)
else:
assert False
# A_init
try:
model1 = abess.SparsePCA(A_init=[[0]])
model1.fit([[1]])
except ValueError as e:
print(e)
else:
assert False
try:
model1 = abess.SparsePCA(A_init=[2])
model1.fit([[1]])
except ValueError as e:
print(e)
else:
assert False
# invalid arg
try:
model1 = abess.SparsePCA(ic_type='other')
model1.fit([[1]])
except ValueError as e:
print(e)
else:
assert False
try:
model1 = abess.SparsePCA(cv=2, cv_score='other')
model1.fit([[1], [2]])
except ValueError as e:
print(e)
else:
assert False
try:
model1 = abess.SparsePCA(cv=5)
model1.fit([[1]])
except ValueError as e:
print(e)
else:
assert False
try:
model1 = abess.SparsePCA(exchange_num=-1)
model1.fit([[1]])
except ValueError as e:
print(e)
else:
assert False
try:
model1 = abess.SparsePCA(thread=-1)
model1.fit([[1]])
except ValueError as e:
print(e)
else:
assert False
try:
model1 = abess.SparsePCA(A_init=[[0, 1, 2]])
model1.fit([[1]])
except ValueError as e:
print(e)
else:
assert False
try:
model1 = abess.SparsePCA(A_init=[-1])
model1.fit([[1]])
except ValueError as e:
print(e)
else:
assert False
@staticmethod
def test_rpca():
model = abess.RobustPCA()
# datatype error
try:
model.fit([['c']], r=1)
except ValueError as e:
print(e)
else:
assert False
try:
model.fit([[1]], r='c')
except ValueError as e:
print(e)
else:
assert False
# incompatible shape
try:
model.fit([1], r=1)
except ValueError as e:
print(e)
else:
assert False
try:
model.fit(1, r=1)
except ValueError as e:
print(e)
else:
assert False
# A_init
try:
model1 = abess.RobustPCA(A_init=[[0]])
model1.fit([[1]], r=1)
except ValueError as e:
print(e)
else:
assert False
try:
model1 = abess.RobustPCA(A_init=[2])
model1.fit([[1]], r=1)
except ValueError as e:
print(e)
else:
assert False
# invalid arg
# try:
# model1 = abess.RobustPCA(group=[1, 2])
# model1.fit([[1]], r=1)
# except ValueError as e:
# print(e)
# else:
# assert False
try:
model1 = abess.RobustPCA(ic_type='other')
model1.fit([[1]], r=1)
except ValueError as e:
print(e)
else:
assert False
try:
model1 = abess.RobustPCA(support_size=[100])
model1.fit([[1]], r=1)
except ValueError as e:
print(e)
else:
assert False
try:
model1 = abess.RobustPCA()
model1.fit([[1]], r=0.1)
except ValueError as e:
print(e)
else:
assert False
try:
model1 = abess.RobustPCA(exchange_num=-1)
model1.fit([[1]], r=1)
except ValueError as e:
print(e)
else:
assert False
try:
model1 = abess.RobustPCA(splicing_type=-1)
model1.fit([[1]], r=1)
except ValueError as e:
print(e)
else:
assert False
try:
model1 = abess.RobustPCA(thread=-1)
model1.fit([[1]], r=1)
except ValueError as e:
print(e)
else:
assert False
model1 = abess.RobustPCA()
model1.fit([[1]])
| 16,095 | 23.240964 | 77 | py |
abess | abess-master/python/pytest/test_dataset.py | import pytest
import numpy as np
import abess
from utilities import assert_shape
@pytest.mark.filterwarnings("ignore")
class TestOther:
"""
Test for other modules in abess package.
Include: `abess.datasets`
"""
@staticmethod
def test_glm():
np.random.seed(123)
n = 100
p = 20
k = 5
# M = 3
rho = 0.
sigma = 1.
for family in ['gaussian', 'binomial', 'poisson', 'gamma', 'ordinal']:
data1 = abess.make_glm_data(
n=n,
p=p,
k=k,
family=family,
rho=rho,
sigma=sigma,
snr=0)
data1 = abess.make_glm_data(
n=n, p=p, k=k, family=family, rho=rho, sigma=sigma)
assert_shape(data1.x, data1.y, n, p, 1)
data2 = abess.make_glm_data(
n=n,
p=p,
k=k,
family=family,
rho=rho,
sigma=sigma,
coef_=data1.coef_)
assert (data1.coef_ == data2.coef_).all()
data3 = abess.make_glm_data(
n=n,
p=p,
k=k,
family=family,
rho=rho,
sigma=sigma,
corr_type="exp")
assert_shape(data3.x, data3.y, n, p, 1)
for family in ['cox']:
data1 = abess.make_glm_data(
n=n,
p=p,
k=k,
family=family,
rho=rho,
sigma=sigma,
censoring=False)
data1 = abess.make_glm_data(
n=n, p=p, k=k, family=family, rho=rho, sigma=sigma)
assert_shape(data1.x, data1.y, n, p, 2)
data2 = abess.make_glm_data(
n=n,
p=p,
k=k,
family=family,
rho=rho,
sigma=sigma,
coef_=data1.coef_)
assert (data1.coef_ == data2.coef_).all()
data3 = abess.make_glm_data(
n=n,
p=p,
k=k,
family=family,
rho=rho,
sigma=sigma,
corr_type="exp")
assert_shape(data3.x, data3.y, n, p, 1)
@staticmethod
def test_multi_glm():
np.random.seed(123)
n = 100
p = 20
k = 5
M = 3
rho = 0.
# sigma = 1.
for family in ['multigaussian', 'multinomial']:
data1 = abess.make_multivariate_glm_data(
n=n, p=p, k=k, family=family, rho=rho, M=M, sparse_ratio=0.1)
data1 = abess.make_multivariate_glm_data(
n=n, p=p, k=k, family=family, rho=rho, M=M)
assert_shape(data1.x, data1.y, n, p, M)
data2 = abess.make_multivariate_glm_data(
n=n, p=p, k=k, family=family, rho=rho, M=M, coef_=data1.coef_)
assert (data1.coef_ == data2.coef_).all()
data3 = abess.make_multivariate_glm_data(
n=n, p=p, k=k, family=family, rho=rho, M=M, corr_type="exp")
assert_shape(data3.x, data3.y, n, p, M)
data1 = abess.make_multivariate_glm_data(
n=n, p=p, k=k, family='poisson', rho=rho, M=M)
assert_shape(data1.x, data1.y, n, p, 1)
# error input
try:
abess.make_glm_data(n=n, p=p, k=k, family='other')
except ValueError as e:
print(e)
else:
assert False
try:
abess.make_multivariate_glm_data(n=n, p=p, k=k, family='other')
except ValueError as e:
print(e)
else:
assert False
| 3,791 | 28.858268 | 78 | py |
abess | abess-master/python/pytest/test_deprecated.py | import abess
import pytest
@pytest.mark.filterwarnings("ignore")
class TestDeprecated:
"""
Test for (future) deprecated modules in abess package.
"""
@staticmethod
def test_linear():
abess.abessLm()
abess.abessLogistic()
abess.abessPoisson()
abess.abessCox()
abess.abessGamma()
abess.abessMultigaussian()
abess.abessMultinomial()
@staticmethod
def test_pca():
abess.abessPCA()
abess.abessRPCA()
@pytest.mark.filterwarnings("error::FutureWarning")
class TestDeprecatedWarning:
"""
Test for (future) deprecated modules' warnings in abess package.
"""
@staticmethod
def test_warning():
try:
abess.abessLm()
except FutureWarning as e:
print(e)
else:
assert False
try:
abess.abessLogistic()
except FutureWarning as e:
print(e)
else:
assert False
try:
abess.abessPoisson()
except FutureWarning as e:
print(e)
else:
assert False
try:
abess.abessCox()
except FutureWarning as e:
print(e)
else:
assert False
try:
abess.abessGamma()
except FutureWarning as e:
print(e)
else:
assert False
try:
abess.abessMultigaussian()
except FutureWarning as e:
print(e)
else:
assert False
try:
abess.abessMultinomial()
except FutureWarning as e:
print(e)
else:
assert False
try:
abess.abessPCA()
except FutureWarning as e:
print(e)
else:
assert False
try:
abess.abessRPCA()
except FutureWarning as e:
print(e)
else:
assert False
| 1,979 | 19.625 | 68 | py |
abess | abess-master/python/pytest/test_flow.py | from time import time
import pytest
import numpy as np
from utilities import (assert_nan, assert_value, assert_fit)
from scipy.sparse import coo_matrix
import abess
@pytest.mark.filterwarnings("ignore")
class TestWorkflow:
"""
Test for abess workflow in cpp. (Take `LinearRegression` as an example.)
"""
@staticmethod
def test_sparse():
np.random.seed(0)
n = 100
p = 20
k = 5
# glm
data = abess.make_glm_data(n=n, p=p, k=k, family='gaussian')
model1 = abess.LinearRegression()
model1.fit(data.x, data.y)
model2 = abess.LinearRegression()
model2.fit(coo_matrix(data.x), data.y, sparse_matrix=True)
assert_value(model1.coef_, model2.coef_)
assert_value(model1.intercept_, model2.intercept_)
model3 = abess.LinearRegression()
model3.fit(data.x, data.y, sparse_matrix=True)
assert_value(model1.coef_, model3.coef_)
assert_value(model1.intercept_, model2.intercept_)
# pca
data_pca = np.random.randn(n, p)
model1 = abess.SparsePCA()
model1.fit(data_pca)
model2 = abess.SparsePCA()
model2.fit(coo_matrix(data_pca), sparse_matrix=True)
assert_value(model1.coef_, model2.coef_)
model3 = abess.SparsePCA()
model3.fit(data_pca, sparse_matrix=True)
assert_value(model1.coef_, model3.coef_)
@staticmethod
def test_path():
np.random.seed(0)
n = 100
p = 20
k = 5
s_min = 0
s_max = 10
data = abess.make_glm_data(n=n, p=p, k=k, family='gaussian')
# null
model1 = abess.LinearRegression(
path_type='seq', support_size=range(s_max))
model1.fit(data.x, data.y)
model2 = abess.LinearRegression(
path_type='gs', s_min=s_min, s_max=s_max)
model2.fit(data.x, data.y)
assert_fit(model1.coef_, model2.coef_)
# cv
t1 = time()
model1 = abess.LinearRegression(
path_type='seq', support_size=range(s_max), cv=5)
model1.fit(data.x, data.y)
model2 = abess.LinearRegression(
path_type='gs', s_min=s_min, s_max=s_max, cv=5)
model2.fit(data.x, data.y)
t1 = time() - t1
assert_fit(model1.coef_, model2.coef_)
# thread
t2 = time()
model1 = abess.LinearRegression(
path_type='seq',
support_size=range(s_max),
cv=5,
thread=0)
model1.fit(data.x, data.y)
model2 = abess.LinearRegression(
path_type='gs',
s_min=s_min,
s_max=s_max,
cv=5,
thread=0)
model2.fit(data.x, data.y)
t2 = time() - t2
assert_fit(model1.coef_, model2.coef_)
# assert t2 < t1
# warm_start
model1 = abess.LinearRegression(
path_type='seq',
support_size=range(s_max),
is_warm_start=False)
model1.fit(data.x, data.y)
model2 = abess.LinearRegression(
path_type='gs',
s_min=s_min,
s_max=s_max,
is_warm_start=False)
model2.fit(data.x, data.y)
assert_value(model1.coef_, model2.coef_, 0, 0)
model1 = abess.LinearRegression(
path_type='seq',
support_size=range(s_max),
is_warm_start=False,
cv=5)
model1.fit(data.x, data.y)
model2 = abess.LinearRegression(
path_type='gs',
s_min=s_min,
s_max=s_max,
is_warm_start=False,
cv=5)
model2.fit(data.x, data.y)
assert_value(model1.coef_, model2.coef_, 0, 0)
@staticmethod
def test_normalize():
np.random.seed(0)
n = 100
p = 20
k = 5
# glm
data = abess.make_glm_data(n=n, p=p, k=k, family='gaussian')
model1 = abess.LinearRegression()
model2 = abess.LinearRegression()
model1.fit(data.x, data.y)
model2.fit(data.x, data.y, is_normal=True)
assert_value(model1.coef_, model2.coef_)
assert_value(model1.intercept_, model2.intercept_)
@staticmethod
def test_possible_input():
np.random.seed(2)
n = 100
p = 20
k = 5
M = 3
# s_min = 0
s_max = 10
screen = 15
imp = 5
data = abess.make_glm_data(n=n, p=p, k=k, family='gaussian')
data2 = abess.make_glm_data(n=n, p=p, k=k, family='binomial')
data3 = abess.make_multivariate_glm_data(
n=n, p=p, k=k, M=M, family='multinomial')
# alpha
model = abess.LinearRegression(alpha=[0.1, 0.2, 0.3])
model.fit(data.x, data.y)
assert_nan(model.coef_)
# screening
model = abess.LinearRegression(
support_size=range(s_max),
screening_size=screen)
model.fit(data.x, data.y)
assert_nan(model.coef_)
model = abess.LinearRegression(
support_size=range(s_max),
screening_size=0)
model.fit(data.x, data.y)
assert_nan(model.coef_)
# important search
model = abess.LinearRegression(
support_size=range(s_max),
important_search=imp)
model.fit(data.x, data.y)
assert_nan(model.coef_)
# splicing_type
model1 = abess.LinearRegression(splicing_type=0)
model1.fit(data.x, data.y)
model2 = abess.LinearRegression(splicing_type=1)
model2.fit(data.x, data.y)
assert_fit(model1.coef_, model2.coef_)
# always_select
model = abess.LinearRegression(always_select=[0, 1, 2, 3])
model.fit(data.x, data.y)
assert np.prod(model.coef_[0:4]) != 0
# group
group = np.repeat([1, 2, 3, 4], [5, 5, 5, 5])
model = abess.LinearRegression(support_size=2, group=group)
model.fit(data.x, data.y)
nonzero = np.nonzero(model.coef_)[0]
assert len(nonzero) == 2 * 5
assert len(set(group[nonzero])) == 2
# ic
for ic in ['loss', 'aic', 'bic', 'ebic', 'gic', 'hic']:
model = abess.LinearRegression(ic_type=ic)
model.fit(data.x, data.y)
for cv_score in ['test_loss', 'roc_auc']:
model = abess.LogisticRegression(cv_score=cv_score, cv=5)
model.fit(data2.x, data2.y)
for cv_score in ['test_loss', 'roc_auc_ovo', 'roc_auc_ovr']:
model = abess.MultinomialRegression(cv_score=cv_score, cv=5)
model.fit(data3.x, data3.y)
# A_init
model = abess.LinearRegression(A_init=[0, 1, 2])
model.fit(data.x, data.y)
| 6,759 | 29.727273 | 76 | py |
abess | abess-master/python/pytest/utilities.py | import os
import pickle
import numpy as np
import pytest
CURRENT = os.path.dirname(os.path.abspath(__file__))
def assert_nan(coef):
assert not np.isnan(np.sum(coef))
def assert_value(coef1, coef2, rel=1e-2, abs=1e-2):
coef1 = coef1.reshape(-1)
coef2 = coef2.reshape(-1)
assert coef1.shape == coef2.shape
assert coef1 == pytest.approx(coef2, rel=rel, abs=abs)
def assert_fit(coef1, coef2):
assert_nan(coef1)
assert_nan(coef2)
pos1 = np.unique(np.nonzero(coef1)[0])
pos2 = np.unique(np.nonzero(coef2)[0])
assert pos1.shape == pos2.shape
assert (pos1 == pos2).all()
# assert_value(coef1[pos1], coef2[pos2])
def assert_shape(x, y, n, p, M):
assert x.shape == (n, p)
assert y.shape[0] == n
if M > 1:
assert y.shape[1] == M
def save_data(data, name):
file = CURRENT + '/data/' + name + '.pkl'
with open(file, 'wb') as output:
pickle.dump(data, output, 4)
print("Data saved in " + file)
def load_data(name):
file = CURRENT + '/data/' + name + '.pkl'
with open(file, 'rb') as input:
data = pickle.load(input)
return data
| 1,134 | 22.163265 | 58 | py |
abess | abess-master/python/src/List.cpp | #ifdef R_BUILD
#include <Rcpp.h>
#include <RcppEigen.h>
#else
#include <Eigen/Eigen>
#include "List.h"
#endif
#include <iostream>
#include <typeinfo>
#include <vector>
using namespace std;
using namespace Eigen;
// void List::add(string name, int value)
// {
// // cout<<"value in int add"<<endl;
// std::size_t i;
// for (i = 0; i < vector_int_name.size(); i++)
// {
// if (vector_int_name[i] == name)
// {
// vector_int[i] = value;
// return;
// }
// }
// vector_int.push_back(value);
// vector_int_name.push_back(name);
// }
void List::combine_beta(VectorXd &value) {
std::size_t i;
for (i = 0; i < vector_MatrixXd_name.size(); i++) {
if (vector_MatrixXd_name[i] == "beta") {
MatrixXd beta_new(vector_MatrixXd[i].rows(), vector_MatrixXd[i].cols() + 1);
beta_new << vector_MatrixXd[i], value;
vector_MatrixXd[i] = beta_new;
return;
}
}
for (i = 0; i < vector_VectorXd_name.size(); i++) {
if (vector_VectorXd_name[i] == "beta") {
MatrixXd beta_new(value.size(), 2);
beta_new << vector_VectorXd[i], value;
vector_VectorXd_name[i] = "beta0";
this->add("beta", beta_new);
return;
}
}
}
void List::add(string name, double value) {
// cout<<"value in double add"<<endl;
std::size_t i;
for (i = 0; i < vector_double_name.size(); i++) {
// cout<<"value in get double"<<endl;
if (vector_double_name[i] == name) {
vector_double[i] = value;
return;
}
}
vector_double.push_back(value);
vector_double_name.push_back(name);
}
void List::add(string name, MatrixXd &value) {
std::size_t i;
for (i = 0; i < vector_MatrixXd_name.size(); i++) {
if (vector_MatrixXd_name[i] == name) {
vector_MatrixXd[i] = value;
return;
}
}
vector_MatrixXd.push_back(value);
vector_MatrixXd_name.push_back(name);
}
void List::add(string name, VectorXd &value) {
std::size_t i;
for (i = 0; i < vector_VectorXd_name.size(); i++) {
if (vector_VectorXd_name[i] == name) {
vector_VectorXd[i] = value;
return;
}
}
vector_VectorXd.push_back(value);
vector_VectorXd_name.push_back(name);
}
void List::add(string name, VectorXi &value) {
std::size_t i;
for (i = 0; i < vector_VectorXi_name.size(); i++) {
if (vector_VectorXi_name[i] == name) {
vector_VectorXi[i] = value;
return;
}
}
vector_VectorXi.push_back(value);
vector_VectorXi_name.push_back(name);
}
// void List::add(string name, Eigen::Matrix<VectorXi, Dynamic, Dynamic> &value)
// {
// // cout<<"value in VectorXi add"<<endl;
// std::size_t i;
// for (i = 0; i < vector_Matrix_VectorXi_name.size(); i++)
// {
// // cout<<"value in get VectorXi"<<endl;
// if (vector_Matrix_VectorXi_name[i] == name)
// {
// vector_Matrix_VectorXi[i] = value;
// return;
// }
// }
// vector_Matrix_VectorXi.push_back(value);
// vector_Matrix_VectorXi_name.push_back(name);
// }
// void List::get_value_by_name(string name, Eigen::Matrix<VectorXi, Dynamic, Dynamic> &value)
// {
// std::size_t i;
// for (i = 0; i < vector_Matrix_VectorXi_name.size(); i++)
// {
// // cout<<"value in get VectorXi"<<endl;
// if (vector_Matrix_VectorXi_name[i] == name)
// {
// value = vector_Matrix_VectorXi[i];
// break;
// }
// }
// }
// void List::add(string name, Eigen::Matrix<VectorXd, Dynamic, Dynamic> &value)
// {
// // cout<<"value in VectorXi add"<<endl;
// std::size_t i;
// for (i = 0; i < vector_Matrix_VectorXd_name.size(); i++)
// {
// // cout<<"value in get VectorXi"<<endl;
// if (vector_Matrix_VectorXd_name[i] == name)
// {
// vector_Matrix_VectorXd[i] = value;
// return;
// }
// }
// vector_Matrix_VectorXd.push_back(value);
// vector_Matrix_VectorXd_name.push_back(name);
// }
// void List::get_value_by_name(string name, Eigen::Matrix<VectorXd, Dynamic, Dynamic> &value)
// {
// std::size_t i;
// for (i = 0; i < vector_Matrix_VectorXd_name.size(); i++)
// {
// // cout<<"value in get VectorXi"<<endl;
// if (vector_Matrix_VectorXd_name[i] == name)
// {
// value = vector_Matrix_VectorXd[i];
// break;
// }
// }
// }
// void List::get_value_by_name(string name, int &value)
// {
// std::size_t i;
// for (i = 0; i < vector_int_name.size(); i++)
// {
// // cout<<"value in get double"<<endl;
// if (vector_int_name[i] == name)
// {
// value = vector_int[i];
// break;
// }
// }
// }
void List::get_value_by_name(string name, double &value) {
std::size_t i;
for (i = 0; i < vector_double_name.size(); i++) {
if (vector_double_name[i] == name) {
value = vector_double[i];
break;
}
}
}
void List::get_value_by_name(string name, MatrixXd &value) {
std::size_t i;
for (i = 0; i < vector_MatrixXd_name.size(); i++) {
if (vector_MatrixXd_name[i] == name) {
value = vector_MatrixXd[i];
break;
}
}
}
void List::get_value_by_name(string name, VectorXd &value) {
std::size_t i;
for (i = 0; i < vector_VectorXd_name.size(); i++) {
if (vector_VectorXd_name[i] == name) {
value = vector_VectorXd[i];
break;
}
}
}
// void List::get_value_by_name(string name, VectorXi &value)
// {
// std::size_t i;
// for (i = 0; i < vector_VectorXi_name.size(); i++)
// {
// // cout<<"value in get VectorXi"<<endl;
// if (vector_VectorXi_name[i] == name)
// {
// value = vector_VectorXi[i];
// break;
// }
// }
// }
| 6,118 | 26.075221 | 94 | cpp |
abess | abess-master/python/src/List.h | #ifndef SRC_LIST_H
#define SRC_LIST_H
#include <Eigen/Eigen>
#include <iostream>
#include <vector>
using namespace std;
using namespace Eigen;
class List {
public:
List(){};
~List(){};
// void add(string name, int value);
// void get_value_by_name(string name, int &value);
void add(string name, double value);
void get_value_by_name(string name, double &value);
void add(string name, MatrixXd &value);
void get_value_by_name(string name, MatrixXd &value);
void add(string name, VectorXd &value);
void get_value_by_name(string name, VectorXd &value);
void add(string name, VectorXi &value);
void combine_beta(VectorXd &value);
// void get_value_by_name(string name, VectorXi &value);
// void add(string name, Eigen::Matrix<VectorXd, Dynamic, Dynamic> &value);
// void get_value_by_name(string name, Eigen::Matrix<VectorXd, Dynamic, Dynamic> &value);
// void add(string name, Eigen::Matrix<VectorXi, Dynamic, Dynamic> &value);
// void get_value_by_name(string name, Eigen::Matrix<VectorXi, Dynamic, Dynamic> &value);
private:
vector<int> vector_int;
vector<string> vector_int_name;
vector<double> vector_double;
vector<string> vector_double_name;
vector<Eigen::MatrixXd> vector_MatrixXd;
vector<string> vector_MatrixXd_name;
vector<Eigen::VectorXd> vector_VectorXd;
vector<string> vector_VectorXd_name;
vector<Eigen::VectorXi> vector_VectorXi;
vector<string> vector_VectorXi_name;
vector<Eigen::Matrix<VectorXi, Dynamic, Dynamic>> vector_Matrix_VectorXi;
vector<string> vector_Matrix_VectorXi_name;
vector<Eigen::Matrix<VectorXd, Dynamic, Dynamic>> vector_Matrix_VectorXd;
vector<string> vector_Matrix_VectorXd_name;
};
#endif // SRC_LIST_H
| 1,775 | 36 | 93 | h |
abess | abess-master/python/src/pywrap.cpp | #include <pybind11/eigen.h>
#include <pybind11/pybind11.h>
#include <tuple>
#include "List.h"
#include "api.h"
std::tuple<Eigen::MatrixXd, Eigen::VectorXd, double, double, double> pywrap_GLM(
Eigen::MatrixXd x_Mat, Eigen::MatrixXd y_Mat, Eigen::VectorXd weight_Vec, int n, int p, int normalize_type,
int algorithm_type, int model_type, int max_iter, int exchange_num, int path_type, bool is_warm_start,
int eval_type, double ic_coef, int Kfold, Eigen::VectorXi gindex_Vec, Eigen::VectorXi sequence_Vec,
Eigen::VectorXd lambda_sequence_Vec, Eigen::VectorXi cv_fold_id_Vec, int s_min, int s_max, double lambda_min,
double lambda_max, int n_lambda, int screening_size, Eigen::VectorXi always_select_Vec,
int primary_model_fit_max_iter, double primary_model_fit_epsilon, bool early_stop, bool approximate_Newton,
int thread, bool covariance_update, bool sparse_matrix, int splicing_type, int sub_search,
Eigen::VectorXi A_init_Vec, bool fit_intercept) {
List mylist = abessGLM_API(x_Mat, y_Mat, n, p, normalize_type, weight_Vec, algorithm_type, model_type, max_iter,
exchange_num, path_type, is_warm_start, eval_type, ic_coef, Kfold, sequence_Vec,
lambda_sequence_Vec, s_min, s_max, lambda_min, lambda_max, n_lambda, screening_size,
gindex_Vec, always_select_Vec, primary_model_fit_max_iter, primary_model_fit_epsilon,
early_stop, approximate_Newton, thread, covariance_update, sparse_matrix, splicing_type,
sub_search, cv_fold_id_Vec, A_init_Vec, fit_intercept);
std::tuple<Eigen::MatrixXd, Eigen::VectorXd, double, double, double> output;
int y_col = y_Mat.cols();
if (y_col == 1 && model_type != 5 && model_type != 6) {
Eigen::VectorXd beta;
double coef0 = 0;
double train_loss = 0;
double test_loss = 0;
double ic = 0;
mylist.get_value_by_name("beta", beta);
mylist.get_value_by_name("coef0", coef0);
mylist.get_value_by_name("train_loss", train_loss);
mylist.get_value_by_name("test_loss", test_loss);
mylist.get_value_by_name("ic", ic);
Eigen::MatrixXd beta_out(beta.size(), 1);
beta_out.col(0) = beta;
Eigen::VectorXd coef0_out(1);
coef0_out(0) = coef0;
output = std::make_tuple(beta_out, coef0_out, train_loss, test_loss, ic);
} else {
Eigen::MatrixXd beta;
Eigen::VectorXd coef0;
double train_loss = 0;
double test_loss = 0;
double ic = 0;
mylist.get_value_by_name("beta", beta);
mylist.get_value_by_name("coef0", coef0);
mylist.get_value_by_name("train_loss", train_loss);
mylist.get_value_by_name("test_loss", test_loss);
mylist.get_value_by_name("ic", ic);
output = std::make_tuple(beta, coef0, train_loss, test_loss, ic);
}
return output;
}
std::tuple<Eigen::MatrixXd, double, double, double, double> pywrap_PCA(
Eigen::MatrixXd x_Mat, Eigen::VectorXd weight_Vec, int n, int p, int normalize_type, Eigen::MatrixXd sigma_Mat,
int max_iter, int exchange_num, int path_type, bool is_warm_start, int eval_type, double ic_coef, int Kfold,
Eigen::VectorXi gindex_Vec, Eigen::MatrixXi sequence_Mat, Eigen::VectorXi cv_fold_id_Vec, int s_min, int s_max,
int screening_size, Eigen::VectorXi always_select_Vec, bool early_stop, int thread, bool sparse_matrix,
int splicing_type, int sub_search, int pca_num, Eigen::VectorXi A_init_Vec) {
List mylist = abessPCA_API(x_Mat, n, p, normalize_type, weight_Vec, sigma_Mat, max_iter, exchange_num, path_type,
is_warm_start, eval_type, ic_coef, Kfold, sequence_Mat, s_min, s_max, screening_size,
gindex_Vec, always_select_Vec, early_stop, thread, sparse_matrix, splicing_type,
sub_search, cv_fold_id_Vec, pca_num, A_init_Vec);
Eigen::MatrixXd beta;
if (pca_num == 1) {
Eigen::VectorXd beta_temp;
mylist.get_value_by_name("beta", beta_temp);
// beta.resize(p, 1);
beta = beta_temp;
} else {
// beta.resize(p, pca_num);
mylist.get_value_by_name("beta", beta);
}
double coef0 = 0;
double train_loss = 0;
double test_loss = 0;
double ic = 0;
mylist.get_value_by_name("coef0", coef0);
mylist.get_value_by_name("train_loss", train_loss);
mylist.get_value_by_name("test_loss", test_loss);
mylist.get_value_by_name("ic", ic);
return std::make_tuple(beta, coef0, train_loss, test_loss, ic);
}
std::tuple<Eigen::VectorXd, double, double, double, double> pywrap_RPCA(
Eigen::MatrixXd x_Mat, int n, int p, int normalize_type, int max_iter, int exchange_num, int path_type,
bool is_warm_start, int eval_type, double ic_coef, Eigen::VectorXi gindex_Vec, Eigen::VectorXi sequence_Vec,
Eigen::VectorXd lambda_sequence_Vec, int s_min, int s_max, double lambda_min, double lambda_max, int n_lambda,
int screening_size, Eigen::VectorXi always_select_Vec, int primary_model_fit_max_iter,
double primary_model_fit_epsilon, bool early_stop, int thread, bool sparse_matrix, int splicing_type,
int sub_search, Eigen::VectorXi A_init_Vec) {
List mylist =
abessRPCA_API(x_Mat, n, p, max_iter, exchange_num, path_type, is_warm_start, eval_type, ic_coef, sequence_Vec,
lambda_sequence_Vec, s_min, s_max, lambda_min, lambda_max, n_lambda, screening_size,
primary_model_fit_max_iter, primary_model_fit_epsilon, gindex_Vec, always_select_Vec, early_stop,
thread, sparse_matrix, splicing_type, sub_search, A_init_Vec);
Eigen::VectorXd beta;
double coef0 = 0;
double train_loss = 0;
double test_loss = 0;
double ic = 0;
mylist.get_value_by_name("beta", beta);
mylist.get_value_by_name("coef0", coef0);
mylist.get_value_by_name("train_loss", train_loss);
mylist.get_value_by_name("test_loss", test_loss);
mylist.get_value_by_name("ic", ic);
return std::make_tuple(beta, coef0, train_loss, test_loss, ic);
}
PYBIND11_MODULE(pybind_cabess, m) {
m.def("pywrap_GLM", &pywrap_GLM);
m.def("pywrap_PCA", &pywrap_PCA);
m.def("pywrap_RPCA", &pywrap_RPCA);
}
| 6,368 | 49.149606 | 119 | cpp |
abess | abess-master/src/Algorithm.h | /**
* @file Algorithm.h
* @brief the algorithm for fitting on given parameter.
* @author Jin Zhu (zhuj37@mail2.sysu.edu.cn),
* Kangkang Jiang (jiangkk3@mail2.sysu.edu.cn),
* Junhao Huang (huangjh256@mail2.sysu.edu.cn)
* @version 0.0.1
* @date 2021-07-31
* @copyright GNU General Public License (GPL)
*/
/*****************************************************************************
* OpenST Basic tool library *
* Copyright (C) 2021 Kangkang Jiang jiangkk3@mail2.sysu.edu.cn *
* *
* This file is part of OST. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License version 3 as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License *
* along with OST. If not, see <http://www.gnu.org/licenses/>. *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
*----------------------------------------------------------------------------*
* Remark : Description *
*----------------------------------------------------------------------------*
* Change History : *
* <Date> | <Version> | <Author> | <Description> *
*----------------------------------------------------------------------------*
* 2021/07/31 | 0.0.1 | Kangkang Jiang | First version *
*----------------------------------------------------------------------------*
* *
*****************************************************************************/
#ifndef SRC_ALGORITHM_H
#define SRC_ALGORITHM_H
#ifndef R_BUILD
#include <Eigen/Eigen>
#include <unsupported/Eigen/MatrixFunctions>
#endif
#include <cfloat>
#include <iostream>
#include "utilities.h"
using namespace std;
#define FINAL_FIT_ITER_ADD 20
bool quick_sort_pair_max(std::pair<int, double> x, std::pair<int, double> y);
// <Eigen::VectorXd, Eigen::VectorXd, double, Eigen::MatrixXd> for Univariate Dense
// <Eigen::VectorXd, Eigen::VectorXd, double, Eigen::SparseMatrix<double> > for Univariate Sparse
// <Eigen::MatrixXd, Eigen::MatrixXd, Eigen::VectorXd, Eigen::MatrixXd> for Multivariable Dense
// <Eigen::MatrixXd, Eigen::MatrixXd, Eigen::VectorXd, Eigen::SparseMatrix<double> > for Multivariable Sparse
/**
* @brief Variable select based on splicing algorithm.
* @tparam T1 for y, XTy, XTone
* @tparam T2 for beta
* @tparam T3 for coef0
* @tparam T4 for X
*/
template <class T1, class T2, class T3, class T4>
class Algorithm {
public:
int model_fit_max; // Maximum number of iterations taken for the primary model fitting.
int model_type; // primary model type.
int algorithm_type; // algorithm type.
int group_df = 0; // freedom
int sparsity_level = 0; // Number of non-zero coefficients.
double lambda_level = 0; // l2 normalization coefficients.
// Eigen::VectorXi train_mask;
int max_iter; // Maximum number of iterations taken for the splicing algorithm to converge.
int exchange_num; // Max exchange variable num.
bool warm_start; // When tuning the optimal parameter combination, whether to use the last solution as a warm start
// to accelerate the iterative convergence of the splicing algorithm.
T4 *x = NULL;
T1 *y = NULL;
T2 beta; // coefficients.
Eigen::VectorXd bd; // sacrifices.
T3 coef0; // intercept.
double train_loss = 0.; // train loss.
T2 beta_init; // initialization coefficients.
T3 coef0_init; // initialization intercept.
Eigen::VectorXi A_init; // initialization active set.
Eigen::VectorXi I_init; // initialization inactive set.
Eigen::VectorXd bd_init; // initialization bd vector.
Eigen::VectorXi A_out; // final active set.
Eigen::VectorXi I_out; // final active set.
bool lambda_change; // lambda_change or not.
Eigen::VectorXi always_select; // always select variable.
double tau; // algorithm stop threshold
int primary_model_fit_max_iter; // The maximal number of iteration for primaty model fit
double primary_model_fit_epsilon; // The epsilon (threshold) of iteration for primaty model fit
T2 beta_warmstart; // warmstart beta.
T3 coef0_warmstart; // warmstart intercept.
double effective_number; // effective number of parameter.
int splicing_type; // exchange number update mathod.
int sub_search; // size of sub_searching in splicing
int U_size;
Algorithm() = default;
virtual ~Algorithm(){};
Algorithm(int algorithm_type, int model_type, int max_iter = 100, int primary_model_fit_max_iter = 10,
double primary_model_fit_epsilon = 1e-8, bool warm_start = true, int exchange_num = 5,
Eigen::VectorXi always_select = Eigen::VectorXi::Zero(0), int splicing_type = 0, int sub_search = 0) {
this->max_iter = max_iter;
this->model_type = model_type;
// this->coef0_init = 0.0;
this->warm_start = warm_start;
this->exchange_num = exchange_num;
this->always_select = always_select;
this->algorithm_type = algorithm_type;
this->primary_model_fit_max_iter = primary_model_fit_max_iter;
this->primary_model_fit_epsilon = primary_model_fit_epsilon;
this->splicing_type = splicing_type;
this->sub_search = sub_search;
};
void set_warm_start(bool warm_start) { this->warm_start = warm_start; }
void update_beta_init(T2 &beta_init) { this->beta_init = beta_init; }
void update_A_init(Eigen::VectorXi &A_init, int g_num) {
this->A_init = A_init;
this->I_init = complement(A_init, g_num);
}
void update_bd_init(Eigen::VectorXd &bd_init) { this->bd_init = bd_init; }
void update_coef0_init(T3 coef0) { this->coef0_init = coef0; }
void update_group_df(int group_df) { this->group_df = group_df; }
void update_sparsity_level(int sparsity_level) { this->sparsity_level = sparsity_level; }
void update_lambda_level(double lambda_level) {
this->lambda_change = this->lambda_level != lambda_level;
this->lambda_level = lambda_level;
}
void update_train_mask(Eigen::VectorXi &train_mask) { this->train_mask = train_mask; }
void update_exchange_num(int exchange_num) { this->exchange_num = exchange_num; }
virtual void update_tau(int train_n, int N) {
if (train_n == 1) {
this->tau = 0.0;
} else {
this->tau =
0.01 * (double)this->sparsity_level * log((double)N) * log(log((double)train_n)) / (double)train_n;
}
}
bool get_warm_start() { return this->warm_start; }
double get_train_loss() { return this->train_loss; }
int get_group_df() { return this->group_df; }
double get_effective_number() { return this->effective_number; }
int get_sparsity_level() { return this->sparsity_level; }
T2 get_beta() { return this->beta; }
T3 get_coef0() { return this->coef0; }
Eigen::VectorXi get_A_out() { return this->A_out; };
Eigen::VectorXi get_I_out() { return this->I_out; };
Eigen::VectorXd get_bd() { return this->bd; }
virtual int get_beta_size(int n, int p) { return p; }
/**
* @param train_x sample matrix for training
* @param train_y response matrix for training
* @param train_weight weight of each sample
* @param g_index the first position of each group
* @param g_size size of each group
* @param train_n sample size for training, i.e. the number of rows in `train_x`
* @param p number of variables, i.e. the number of columns in `train_x`
* @param N number of different groups
*/
void fit(T4 &train_x, T1 &train_y, Eigen::VectorXd &train_weight, Eigen::VectorXi &g_index, Eigen::VectorXi &g_size,
int train_n, int p, int N) {
int T0 = this->sparsity_level;
this->x = &train_x;
this->y = &train_y;
// Warm-start:
// If warm-start is disabled, they would be just zero.
this->beta = this->beta_init;
this->coef0 = this->coef0_init;
this->bd = this->bd_init;
// Initialize sub-search:
// To speed up, we focus on a subset of all groups, named `U`,
// whose size is equal or smaller than N.
// (More details can be found in function `get_A` below)
if (this->sub_search == 0 || this->sparsity_level + this->sub_search > N)
this->U_size = N;
else
this->U_size = this->sparsity_level + this->sub_search;
// Specific init:
// Some specific initial setting for each algorithm,
// e.g. pre-computed items.
this->inital_setting(train_x, train_y, train_weight, g_index, g_size, N);
// No need to splicing?
// If N == T0, we must put all groups into the model.
if (N == T0) {
this->A_out = Eigen::VectorXi::LinSpaced(N, 0, N - 1);
// T2 beta_old = this->beta;
// T3 coef0_old = this->coef0;
bool success = this->primary_model_fit(train_x, train_y, train_weight, this->beta, this->coef0, DBL_MAX,
this->A_out, g_index, g_size);
// if (!success){
// this->beta = beta_old;
// this->coef0 = coef0_old;
// }
this->train_loss = this->loss_function(train_x, train_y, train_weight, this->beta, this->coef0, this->A_out,
g_index, g_size, this->lambda_level);
this->effective_number = this->effective_number_of_parameter(train_x, train_x, train_y, train_weight,
this->beta, this->beta, this->coef0);
return;
}
// Initial active/inactive set:
// Defaultly, choose `T0` groups with largest `bd_init` as initial active set.
// If there is no `bd_init` (may be no warm-start), compute it on `beta_init`, `coef0_init`, `A_init`.
// However, you can also define your own criterion by rewrite the function.
Eigen::VectorXi A = this->inital_screening(train_x, train_y, this->beta, this->coef0, this->A_init,
this->I_init, this->bd, train_weight, g_index, g_size, N);
Eigen::VectorXi I = complement(A, N);
// `A_ind` stores all indexes of active set.
// For example, if "Group 1" is active and there are three variables inside,
// `A` will only contain "Group 1" while `A_ind` store all three variables' indexes.
Eigen::VectorXi A_ind = find_ind(A, g_index, g_size, (this->beta).rows(), N);
T4 X_A = X_seg(train_x, train_n, A_ind, this->model_type);
T2 beta_A;
slice(this->beta, A_ind, beta_A);
// if (this->algorithm_type == 6)
// {
// T3 coef0_old = this->coef0;
// Fitting on initial active set
bool success =
this->primary_model_fit(X_A, train_y, train_weight, beta_A, this->coef0, DBL_MAX, A, g_index, g_size);
// if (!success){
// this->coef0 = coef0_old;
// }else{
slice_restore(beta_A, A_ind, this->beta);
this->train_loss = this->loss_function(X_A, train_y, train_weight, beta_A, this->coef0, A, g_index, g_size,
this->lambda_level);
// }
// for (int i=0;i<A.size();i++) cout<<A(i)<<" ";cout<<endl<<"init loss = "<<this->train_loss<<endl;
// }
this->beta_warmstart = this->beta;
this->coef0_warmstart = this->coef0;
// Start splicing:
// `C_max` is the maximum exchange number in splicing.
// `get_A()` is to find and return the final chosen active set on spasity `T0`.
int always_select_size = this->always_select.size();
int C_max = min(min(T0 - always_select_size, this->U_size - T0 - always_select_size), this->exchange_num);
this->update_tau(train_n, N);
this->get_A(train_x, train_y, A, I, C_max, this->beta, this->coef0, this->bd, T0, train_weight, g_index, g_size,
N, this->tau, this->train_loss);
// Final fitting on `A`:
// For higher accuracy, fit again on chosen active set
// with stricter settings.
this->final_fitting(train_x, train_y, train_weight, A, g_index, g_size, train_n, N);
// Result & Output
this->A_out = A;
this->effective_number =
this->effective_number_of_parameter(train_x, X_A, train_y, train_weight, this->beta, beta_A, this->coef0);
this->group_df = A_ind.size();
return;
};
void get_A(T4 &X, T1 &y, Eigen::VectorXi &A, Eigen::VectorXi &I, int &C_max, T2 &beta, T3 &coef0,
Eigen::VectorXd &bd, int T0, Eigen::VectorXd &weights, Eigen::VectorXi &g_index, Eigen::VectorXi &g_size,
int N, double tau, double &train_loss) {
// Universal set:
// We only consider splicing on a set `U`,
// which may not contain all groups, but we hope all "useful" groups are included.
// We need to extract these groups out, e.g. `X`->`X_U`, `A`->`A_U`,
// and they have a new index from 0 to `U_size`-1.
Eigen::VectorXi U(this->U_size);
Eigen::VectorXi U_ind;
Eigen::VectorXi g_index_U(this->U_size);
Eigen::VectorXi g_size_U(this->U_size);
T4 *X_U = new T4;
T2 beta_U;
Eigen::VectorXi A_U(T0);
Eigen::VectorXi I_U(this->U_size - T0);
Eigen::VectorXi always_select_U(this->always_select.size());
if (this->U_size == N) {
// If `U_size` == `N`, focus on all groups.
U = Eigen::VectorXi::LinSpaced(N, 0, N - 1);
} else {
// If `U_size` < `N`, focus on `U_size` groups with larger sacrifices.
U = max_k(bd, this->U_size, true);
}
// int p = X.cols();
int n = X.rows();
int C = C_max;
// The outer iteration:
// 1. extract data from U
// 2. splicing & fitting on U (inner iteration), update active set
// 3. update U
// 4. if U changed, exit
int iter = 0;
while (iter++ < this->max_iter) {
// mapping
if (this->U_size == N) {
// If consider all groups, it is no need to map or give a new index.
delete X_U;
X_U = &X;
U_ind = Eigen::VectorXi::LinSpaced((this->beta).rows(), 0, (this->beta).rows() - 1);
beta_U = beta;
g_size_U = g_size;
g_index_U = g_index;
A_U = A;
I_U = I;
always_select_U = this->always_select;
} else {
// Extract `X`, `beta`, `g_index`, `g_size`, `always_select` on U,
// give them new index (from 0 to U_size-1),
// and name as `X_U`, `beta_U`, `g_index_U`, `g_size_U`, `always_select_U` respectively.
U_ind = find_ind(U, g_index, g_size, (this->beta).rows(), N);
*X_U = X_seg(X, n, U_ind, this->model_type);
slice(beta, U_ind, beta_U);
int pos = 0;
for (int i = 0; i < U.size(); i++) {
g_size_U(i) = g_size(U(i));
g_index_U(i) = pos;
pos += g_size_U(i);
}
// Since we have ranked U from large to small with sacrifice,
// the first `T0` group should be initial active sets.
A_U = Eigen::VectorXi::LinSpaced(T0, 0, T0 - 1);
I_U = Eigen::VectorXi::LinSpaced(this->U_size - T0, T0, this->U_size - 1);
int *temp = new int[N], s = this->always_select.size();
memset(temp, 0, N);
for (int i = 0; i < s; i++) temp[this->always_select(i)] = 1;
for (int i = 0; i < this->U_size; i++) {
if (s <= 0) break;
if (temp[U(i)] == 1) {
always_select_U(this->always_select.size() - s) = i;
s--;
}
}
delete[] temp;
}
// The inner iteration:
// 1. splicing on U
// 2. update A_U
int num = -1;
while (true) {
num++;
Eigen::VectorXi A_ind = find_ind(A_U, g_index_U, g_size_U, U_ind.size(), this->U_size);
T4 X_A = X_seg(*X_U, n, A_ind, this->model_type);
T2 beta_A;
slice(beta_U, A_ind, beta_A);
Eigen::VectorXd bd_U = Eigen::VectorXd::Zero(this->U_size);
this->sacrifice(*X_U, X_A, y, beta_U, beta_A, coef0, A_U, I_U, weights, g_index_U, g_size_U,
this->U_size, A_ind, bd_U, U, U_ind, num);
for (int i = 0; i < always_select_U.size(); i++) {
bd_U(always_select_U(i)) = DBL_MAX;
}
// Splicing:
// Try to exchange items in active and inactive set,
// If new loss is smaller, accept it and return TRUE.
double l0 = train_loss;
bool exchange = this->splicing(*X_U, y, A_U, I_U, C_max, beta_U, coef0, bd_U, weights, g_index_U,
g_size_U, this->U_size, tau, l0);
if (exchange)
train_loss = l0;
else
// A_U is unchanged, so break.
break;
}
// If A_U not change, U will not change and we can stop.
if (A_U.size() == 0 || A_U.maxCoeff() == T0 - 1) break;
// Update & Restore beta, A from U
slice_restore(beta_U, U_ind, beta);
Eigen::VectorXi ind = Eigen::VectorXi::Zero(N);
for (int i = 0; i < T0; i++) ind(U(A_U(i))) = 1;
int tempA = 0, tempI = 0;
for (int i = 0; i < N; i++)
if (ind(i) == 0)
I(tempI++) = i;
else
A(tempA++) = i;
// Compute sacrifices in full set
Eigen::VectorXi A_ind0 = find_ind(A, g_index, g_size, (this->beta).rows(), N);
T4 X_A0 = X_seg(X, n, A_ind0, this->model_type);
T2 beta_A0;
slice(beta, A_ind0, beta_A0);
Eigen::VectorXi U0 = Eigen::VectorXi::LinSpaced(N, 0, N - 1); // U0 contains all groups
Eigen::VectorXi U_ind0 = Eigen::VectorXi::LinSpaced((this->beta).rows(), 0, (this->beta).rows() - 1);
this->sacrifice(X, X_A0, y, beta, beta_A0, coef0, A, I, weights, g_index, g_size, N, A_ind0, bd, U0, U_ind0,
0);
if (this->U_size == N) {
// If U is the full set, there is no need to update, so stop.
for (int i = 0; i < this->always_select.size(); i++) bd(this->always_select(i)) = DBL_MAX;
break;
} else {
// If U is changed in the new situation, update it and iter again.
for (int i = 0; i < T0; i++) bd(A(i)) = DBL_MAX;
Eigen::VectorXi U_new = max_k(bd, this->U_size, true);
U = U_new;
C_max = C;
}
}
if (this->U_size != N) delete X_U;
return;
};
bool splicing(T4 &X, T1 &y, Eigen::VectorXi &A, Eigen::VectorXi &I, int &C_max, T2 &beta, T3 &coef0,
Eigen::VectorXd &bd, Eigen::VectorXd &weights, Eigen::VectorXi &g_index, Eigen::VectorXi &g_size,
int N, double tau, double &train_loss) {
if (C_max <= 0) return false;
// init
// int p = X.cols();
int n = X.rows();
int A_size = A.size();
int I_size = I.size();
Eigen::VectorXd beta_A_group(A_size);
Eigen::VectorXd d_I_group(I_size);
for (int i = 0; i < A_size; i++) {
beta_A_group(i) = bd(A(i));
}
for (int i = 0; i < I_size; i++) {
d_I_group(i) = bd(I(i));
}
Eigen::VectorXi A_min_k = min_k(beta_A_group, C_max, true);
Eigen::VectorXi I_max_k = max_k(d_I_group, C_max, true);
Eigen::VectorXi s1 = vector_slice(A, A_min_k);
Eigen::VectorXi s2 = vector_slice(I, I_max_k);
// for (int i=0;i<C_max;i++){
// cout<<"try: ("<<s1(i)<<","<<bd(s1(i))<<") -> ("<<s2(i)<<","<<bd(s2(i))<<")"<<endl;///
// }
Eigen::VectorXi A_exchange(A_size);
Eigen::VectorXi A_ind_exchage;
T4 X_A_exchage;
T2 beta_A_exchange;
T3 coef0_A_exchange;
double L;
for (int k = C_max; k >= 1;) {
A_exchange = diff_union(A, s1, s2);
A_ind_exchage = find_ind(A_exchange, g_index, g_size, (this->beta).rows(), N);
X_A_exchage = X_seg(X, n, A_ind_exchage, this->model_type);
slice(beta, A_ind_exchage, beta_A_exchange);
coef0_A_exchange = coef0;
bool success = this->primary_model_fit(X_A_exchage, y, weights, beta_A_exchange, coef0_A_exchange,
train_loss, A_exchange, g_index, g_size);
// if (success){
L = this->loss_function(X_A_exchage, y, weights, beta_A_exchange, coef0_A_exchange, A_exchange, g_index,
g_size, this->lambda_level);
// }else{
// L = train_loss + 1;
// }
if (train_loss - L > tau) {
train_loss = L;
A = A_exchange;
I = complement(A_exchange, N);
slice_restore(beta_A_exchange, A_ind_exchage, beta);
coef0 = coef0_A_exchange;
C_max = k;
return true;
} else {
if (this->splicing_type == 1)
k = k - 1;
else
k = k / 2;
s1 = s1.head(k).eval();
s2 = s2.head(k).eval();
}
}
return false;
};
virtual void inital_setting(T4 &X, T1 &y, Eigen::VectorXd &weights, Eigen::VectorXi &g_index,
Eigen::VectorXi &g_size, int &N){};
virtual void clear_setting(){};
virtual Eigen::VectorXi inital_screening(T4 &X, T1 &y, T2 &beta, T3 &coef0, Eigen::VectorXi &A, Eigen::VectorXi &I,
Eigen::VectorXd &bd, Eigen::VectorXd &weights, Eigen::VectorXi &g_index,
Eigen::VectorXi &g_size, int &N) {
if (bd.size() == 0) {
// variable initialization
int n = X.rows();
int beta_size = this->get_beta_size(X.rows(), X.cols());
bd = Eigen::VectorXd::Zero(N);
// calculate beta & d & h
Eigen::VectorXi A_ind = find_ind(A, g_index, g_size, beta_size, N);
T4 X_A = X_seg(X, n, A_ind, this->model_type);
T2 beta_A;
slice(beta, A_ind, beta_A);
Eigen::VectorXi U = Eigen::VectorXi::LinSpaced(N, 0, N - 1);
Eigen::VectorXi U_ind = Eigen::VectorXi::LinSpaced(beta_size, 0, beta_size - 1);
this->sacrifice(X, X_A, y, beta, beta_A, coef0, A, I, weights, g_index, g_size, N, A_ind, bd, U, U_ind, 0);
// A_init
for (int i = 0; i < A.size(); i++) {
bd(A(i)) = DBL_MAX / 2;
}
// alway_select
for (int i = 0; i < this->always_select.size(); i++) {
bd(this->always_select(i)) = DBL_MAX;
}
}
// get Active-set A according to max_k bd
Eigen::VectorXi A_new = max_k(bd, this->sparsity_level);
return A_new;
}
void final_fitting(T4 &train_x, T1 &train_y, Eigen::VectorXd &train_weight, Eigen::VectorXi &A,
Eigen::VectorXi &g_index, Eigen::VectorXi &g_size, int train_n, int N) {
Eigen::VectorXi A_ind = find_ind(A, g_index, g_size, (this->beta).rows(), N);
T4 X_A = X_seg(train_x, train_n, A_ind, this->model_type);
T2 beta_A;
slice(this->beta, A_ind, beta_A);
this->primary_model_fit_max_iter += FINAL_FIT_ITER_ADD;
// coef0_old = this->coef0;
bool success =
this->primary_model_fit(X_A, train_y, train_weight, beta_A, this->coef0, DBL_MAX, A, g_index, g_size);
// if (!success){
// this->coef0 = coef0_old;
// }else{
slice_restore(beta_A, A_ind, this->beta);
this->train_loss = this->loss_function(X_A, train_y, train_weight, beta_A, this->coef0, A, g_index, g_size,
this->lambda_level);
// }
this->primary_model_fit_max_iter -= FINAL_FIT_ITER_ADD;
}
virtual double loss_function(T4 &X, T1 &y, Eigen::VectorXd &weights, T2 &beta, T3 &coef0, Eigen::VectorXi &A,
Eigen::VectorXi &g_index, Eigen::VectorXi &g_size, double lambda) {
return 0;
};
virtual void sacrifice(T4 &X, T4 &XA, T1 &y, T2 &beta, T2 &beta_A, T3 &coef0, Eigen::VectorXi &A,
Eigen::VectorXi &I, Eigen::VectorXd &weights, Eigen::VectorXi &g_index,
Eigen::VectorXi &g_size, int N, Eigen::VectorXi &A_ind, Eigen::VectorXd &bd,
Eigen::VectorXi &U, Eigen::VectorXi &U_ind, int num) {
return;
};
virtual bool primary_model_fit(T4 &X, T1 &y, Eigen::VectorXd &weights, T2 &beta, T3 &coef0, double loss0,
Eigen::VectorXi &A, Eigen::VectorXi &g_index, Eigen::VectorXi &g_size) {
return true;
};
virtual double effective_number_of_parameter(T4 &X, T4 &XA, T1 &y, Eigen::VectorXd &weights, T2 &beta, T2 &beta_A,
T3 &coef0) {
return this->sparsity_level;
};
};
#endif // SRC_ALGORITHM_H
| 27,565 | 42.617089 | 120 | h |
abess | abess-master/src/AlgorithmGLM.h | #ifndef SRC_ALGORITHMGLM_H
#define SRC_ALGORITHMGLM_H
#include "Algorithm.h"
using namespace std;
template <class T1, class T2, class T3, class T4>
class _abessGLM : public Algorithm<T1, T2, T3, T4> {
public:
_abessGLM(int algorithm_type, int model_type, int max_iter, int primary_model_fit_max_iter,
double primary_model_fit_epsilon, bool warm_start, int exchange_num, Eigen::VectorXi always_select,
int splicing_type, int sub_search)
: Algorithm<T1, T2, T3, T4>::Algorithm(algorithm_type, model_type, max_iter, primary_model_fit_max_iter,
primary_model_fit_epsilon, warm_start, exchange_num, always_select,
splicing_type, sub_search){};
~_abessGLM(){};
bool approximate_Newton; // use approximate Newton method or not.
bool fit_intercept = true; // whether to consider intercept (coef0) or not.
double newton_step = 1;
// Eigen::MatrixXd G; // Gradient
// Eigen::MatrixXd H; // Hessian
double Hessian_range[2] = {1e-7, 1e7}; // the range of acceptable value in Hessian
double Xbeta_range[2] = {-DBL_MAX, DBL_MAX}; // the range of acceptable value for X * beta
/* --- TO BE IMPLEMENTED IN CHILD CLASS --- */
virtual Eigen::MatrixXd gradient_core(T4 &X_full, T1 &y, Eigen::VectorXd &weights, T2 &beta_full) {
// the gradient matrix can be expressed as G = X^T * A,
// returns the gradient core A
return Eigen::MatrixXd::Zero(beta_full.rows(), beta_full.cols());
};
virtual Eigen::VectorXd hessian_core(T4 &X_full, T1 &y, Eigen::VectorXd &weights, T2 &beta_full) {
// the hessian matrix can be expressed as H = X^T * D * X,
// returns the (diagnal values of) diagnal matrix D.
return Eigen::VectorXd::Ones(X_full.rows());
};
virtual T1 inv_link_function(T4 &X_full, T2 &beta_full) {
// denote the link function g() as g(y) = X^T * beta,
// return its inverse g^{-1}(X, beta), i.e. predicted y
return T1(X_full * beta_full);
}
virtual T1 log_probability(T4 &X_full, T2 &beta_full, T1 &y) {
// returns log P(y | X, beta)
return T1(Eigen::MatrixXd::Zero(y.rows(), y.cols()));
}
virtual bool null_model(T1 &y, Eigen::VectorXd &weights, T3 &coef0) {
// returns a null model,
// i.e. given only y, fit an intercept
return true;
}
/* ---------------------------------------- */
/* --- CAN BE RE-IMPLEMENTED IN CHILD CLASS --- */
virtual Eigen::MatrixXd gradient(T4 &X_full, T1 &y, Eigen::VectorXd &weights, T2 &beta_full) {
// returns gradient matrix
return X_full.transpose() * this->gradient_core(X_full, y, weights, beta_full);
}
virtual Eigen::MatrixXd hessian(T4 &X_full, T1 &y, Eigen::VectorXd &weights, T2 &beta_full) {
// returns hessian matrix
Eigen::VectorXd H_core = this->hessian_core(X_full, y, weights, beta_full);
Eigen::VectorXd H_diag(X_full.cols());
for (int i = 0; i < X_full.cols(); i++) {
H_diag(i) = X_full.col(i).eval().cwiseProduct(H_core).dot(X_full.col(i).eval());
trunc(H_diag(i), Hessian_range);
}
return Eigen::MatrixXd(H_diag.asDiagonal());
};
virtual bool primary_model_fit(T4 &X, T1 &y, Eigen::VectorXd &weights, T2 &beta, T3 &coef0, double loss0,
Eigen::VectorXi &A, Eigen::VectorXi &g_index, Eigen::VectorXi &g_size) {
if (X.cols() == 0) return null_model(y, weights, coef0);
if (this->approximate_Newton) {
// Fitting method 1: Approximate Newton
return this->_approx_newton_fit(X, y, weights, beta, coef0, loss0, A, g_index, g_size);
} else {
// Fitting method 2: Iteratively Reweighted Least Squares
return this->_IRLS_fit(X, y, weights, beta, coef0, loss0, A, g_index, g_size);
}
return true;
};
virtual double loss_function(T4 &X, T1 &y, Eigen::VectorXd &weights, T2 &beta, T3 &coef0, Eigen::VectorXi &A,
Eigen::VectorXi &g_index, Eigen::VectorXi &g_size, double lambda) {
int n = X.rows();
int p = X.cols();
int M = y.cols();
T4 X_full;
T2 beta_full;
add_constant_column(X_full, X, true);
combine_beta_coef0(beta_full, beta, coef0, true);
Eigen::VectorXd log_proba = this->log_probability(X_full, beta_full, y);
return -log_proba.dot(weights) + lambda * beta.cwiseAbs2().sum();
};
virtual void sacrifice(T4 &X, T4 &XA, T1 &y, T2 &beta, T2 &beta_A, T3 &coef0, Eigen::VectorXi &A,
Eigen::VectorXi &I, Eigen::VectorXd &weights, Eigen::VectorXi &g_index,
Eigen::VectorXi &g_size, int N, Eigen::VectorXi &A_ind, Eigen::VectorXd &bd,
Eigen::VectorXi &U, Eigen::VectorXi &U_ind, int num) {
int p = X.cols();
int n = X.rows();
int M = y.cols();
int A_size = A.size();
int I_size = I.size();
T4 X_A_full;
T2 beta_A_full;
add_constant_column(X_A_full, XA, true);
combine_beta_coef0(beta_A_full, beta_A, coef0, true);
Eigen::MatrixXd G = X.transpose() * this->gradient_core(X_A_full, y, weights, beta_A_full);
Eigen::VectorXd H_core = this->hessian_core(X_A_full, y, weights, beta_A_full);
G -= 2 * this->lambda_level * beta;
Eigen::MatrixXd betabar = Eigen::MatrixXd::Zero(p, M);
Eigen::MatrixXd dbar = Eigen::MatrixXd::Zero(p, M);
for (int i = 0; i < N; i++) {
// focus on X in group i
T4 XG = X.middleCols(g_index(i), g_size(i));
T4 XG_new = XG;
for (int j = 0; j < g_size(i); j++) {
XG_new.col(j) = XG.col(j).cwiseProduct(H_core);
}
Eigen::MatrixXd XGbar =
XG_new.transpose() * XG + 2 * this->lambda_level * Eigen::MatrixXd::Identity(g_size(i), g_size(i));
Eigen::MatrixXd phiG;
XGbar.sqrt().evalTo(phiG);
Eigen::MatrixXd invphiG = phiG.ldlt().solve(Eigen::MatrixXd::Identity(g_size(i), g_size(i)));
// compute sacrifice for each variable
betabar.block(g_index(i), 0, g_size(i), M) = phiG * beta.block(g_index(i), 0, g_size(i), M);
dbar.block(g_index(i), 0, g_size(i), M) = invphiG * G.block(g_index(i), 0, g_size(i), M);
}
// backward sacrifice (group)
for (int i = 0; i < A_size; i++) {
bd(A(i)) = betabar.block(g_index(A[i]), 0, g_size(A[i]), M).squaredNorm() / g_size(A(i));
}
// forward sacrifice (group)
for (int i = 0; i < I_size; i++) {
bd(I(i)) = dbar.block(g_index(I[i]), 0, g_size(I[i]), M).squaredNorm() / g_size(I(i));
}
};
virtual double effective_number_of_parameter(T4 &X, T4 &XA, T1 &y, Eigen::VectorXd &weights, T2 &beta, T2 &beta_A,
T3 &coef0) {
if (XA.cols() == 0) return 0;
if (this->lambda_level == 0) return XA.cols();
T4 X_A_full;
T2 beta_A_full;
add_constant_column(X_A_full, XA, true);
combine_beta_coef0(beta_A_full, beta_A, coef0, true);
Eigen::VectorXd H_core = this->hessian_core(X_A_full, y, weights, beta_A_full);
T4 XA_new = XA;
for (int j = 0; j < XA.cols(); j++) {
XA_new.col(j) = XA.col(j).cwiseProduct(H_core);
}
Eigen::MatrixXd XGbar = XA_new.transpose() * XA;
Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> adjoint_eigen_solver(XGbar);
double enp = 0.;
for (int i = 0; i < adjoint_eigen_solver.eigenvalues().size(); i++) {
enp += adjoint_eigen_solver.eigenvalues()(i) / (adjoint_eigen_solver.eigenvalues()(i) + this->lambda_level);
}
return enp;
};
/* -------------------------------------------- */
/* --- BUILT-IN FITTING METHOD --- */
bool _approx_newton_fit(T4 &X, T1 &y, Eigen::VectorXd &weights, T2 &beta, T3 &coef0, double loss0,
Eigen::VectorXi &A, Eigen::VectorXi &g_index, Eigen::VectorXi &g_size) {
// Fitting method 1: Approximate Newton
int n = X.rows();
int p = X.cols();
int M = y.cols();
T4 X_full;
T2 beta_full;
add_constant_column(X_full, X, this->fit_intercept);
combine_beta_coef0(beta_full, beta, coef0, this->fit_intercept);
// initialize
double step = this->newton_step;
double loss = this->loss_function(X, y, weights, beta, coef0, A, g_index, g_size, this->lambda_level);
for (int iter = 0; iter < this->primary_model_fit_max_iter; iter++) {
// get Gradient/Hessian (no penalty)
Eigen::MatrixXd G = this->gradient(X_full, y, weights, beta_full);
Eigen::MatrixXd H = this->hessian(X_full, y, weights, beta_full);
// update direction (add penalty)
Eigen::MatrixXd direction = G;
for (int i = 0; i < direction.rows(); i++) {
double hii = H(i, i);
if (!this->fit_intercept || i > 0) {
direction.row(i) -= 2 * this->lambda_level * beta_full.row(i).eval();
hii += 2 * this->lambda_level;
}
direction.row(i) /= hii;
}
// update beta
T2 beta_new = beta_full + step * T2(direction);
extract_beta_coef0(beta_new, beta, coef0, this->fit_intercept);
double loss_new = this->loss_function(X, y, weights, beta, coef0, A, g_index, g_size, this->lambda_level);
// step down if loss_new > loss
while (loss_new > loss && step > this->primary_model_fit_epsilon) {
step /= 2;
beta_new = beta_full + step * direction;
extract_beta_coef0(beta_new, beta, coef0, this->fit_intercept);
loss_new = this->loss_function(X, y, weights, beta, coef0, A, g_index, g_size, this->lambda_level);
}
// Update beta_full if loss decrease
if (loss_new > loss) {
break;
} else {
beta_full = beta_new;
}
// Early stop 1: expected final loss is too large
double expected_loss = loss - (this->primary_model_fit_max_iter - iter) * (loss - loss_new);
if (expected_loss >= loss0 + this->tau) break;
// Early stop 2: step is too small
if (step < this->primary_model_fit_epsilon) break;
}
// extract beta and coef0
extract_beta_coef0(beta_full, beta, coef0, this->fit_intercept);
return true;
};
bool _IRLS_fit(T4 &X, T1 &y, Eigen::VectorXd &weights, T2 &beta, T3 &coef0, double loss0, Eigen::VectorXi &A,
Eigen::VectorXi &g_index, Eigen::VectorXi &g_size) {
// Fitting method 2: Iteratively Reweighted Least Squares
int n = X.rows();
int p = X.cols();
int M = y.cols();
// X_full: add constant col to X
T4 X_full;
T2 beta_full;
add_constant_column(X_full, X, this->fit_intercept);
combine_beta_coef0(beta_full, beta, coef0, this->fit_intercept);
// initialize
T4 X_new(X_full);
double loss = this->loss_function(X, y, weights, beta, coef0, A, g_index, g_size, this->lambda_level);
for (int iter = 0; iter < this->primary_model_fit_max_iter; iter++) {
Eigen::VectorXd D = this->hessian_core(X_full, y, weights, beta_full);
// reweight
T1 y_pred = this->inv_link_function(X_full, beta_full);
T1 Z = y - y_pred;
array_quotient(Z, D, 1);
Z += X_full * beta_full;
for (int i = 0; i < X_full.cols(); i++) {
X_new.col(i) = X_full.col(i).cwiseProduct(D);
}
// update beta
Eigen::MatrixXd lambda_one = Eigen::MatrixXd::Identity(X_full.cols(), X_full.cols());
if (this->fit_intercept) lambda_one(0, 0) = 0;
Eigen::MatrixXd XTX = 2 * this->lambda_level * lambda_one + X_new.transpose() * X_full;
beta_full = XTX.ldlt().solve(X_new.transpose() * Z);
// compute new loss
extract_beta_coef0(beta_full, beta, coef0, this->fit_intercept);
double loss_new = this->loss_function(X, y, weights, beta, coef0, A, g_index, g_size, this->lambda_level);
// Early stop 1: expected final loss is too large
double expected_loss = loss - (this->primary_model_fit_max_iter - iter) * (loss - loss_new);
if (expected_loss >= loss0 + this->tau) break;
// Early stop 2: step is too small
double step = (loss - loss_new) / (0.1 + loss_new);
if (step < this->primary_model_fit_epsilon) break;
// Early stop 3: acceptable precision
if (loss_new < min(1e-3, this->tau)) break;
// update loss
loss = loss_new;
}
// extract beta and coef0
extract_beta_coef0(beta_full, beta, coef0, this->fit_intercept);
return true;
};
/* ------------------------------- */
};
template <class T4>
class abessLogistic : public _abessGLM<Eigen::VectorXd, Eigen::VectorXd, double, T4> {
public:
abessLogistic(int algorithm_type, int model_type, int max_iter = 30, int primary_model_fit_max_iter = 10,
double primary_model_fit_epsilon = 1e-8, bool warm_start = true, int exchange_num = 5,
Eigen::VectorXi always_select = Eigen::VectorXi::Zero(0), int splicing_type = 0, int sub_search = 0)
: _abessGLM<Eigen::VectorXd, Eigen::VectorXd, double, T4>::_abessGLM(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon, warm_start,
exchange_num, always_select, splicing_type, sub_search){};
~abessLogistic(){};
double Xbeta_range[2] = {-30, 30};
double PiPj_range[2] = {0.001, 1}; // Pi * Pj
Eigen::MatrixXd gradient_core(T4 &X_full, Eigen::VectorXd &y, Eigen::VectorXd &weights,
Eigen::VectorXd &beta_full) {
Eigen::VectorXd Pi = this->inv_link_function(X_full, beta_full);
Eigen::VectorXd G = (y - Pi).cwiseProduct(weights);
return Eigen::MatrixXd(G);
};
Eigen::VectorXd hessian_core(T4 &X_full, Eigen::VectorXd &y, Eigen::VectorXd &weights, Eigen::VectorXd &beta_full) {
Eigen::VectorXd Pi = this->inv_link_function(X_full, beta_full);
Eigen::VectorXd one = Eigen::VectorXd::Ones(X_full.rows());
Eigen::VectorXd W = Pi.cwiseProduct(one - Pi).cwiseProduct(weights);
trunc(W, PiPj_range);
return W;
};
Eigen::VectorXd inv_link_function(T4 &X_full, Eigen::VectorXd &beta_full) {
Eigen::VectorXd eta = X_full * beta_full;
trunc(eta, Xbeta_range);
Eigen::VectorXd one = Eigen::VectorXd::Ones(eta.size());
Eigen::VectorXd expeta = eta.array().exp();
Eigen::VectorXd Pi = expeta.array() / (one + expeta).array();
return Pi;
};
Eigen::VectorXd log_probability(T4 &X_full, Eigen::VectorXd &beta_full, Eigen::VectorXd &y) {
Eigen::VectorXd ones = Eigen::VectorXd::Ones(y.size());
Eigen::VectorXd P_1 = this->inv_link_function(X_full, beta_full);
Eigen::VectorXd logP_1 = P_1.array().log();
Eigen::VectorXd logP_0 = (ones - P_1).array().log();
return y.cwiseProduct(logP_1) + (ones - y).cwiseProduct(logP_0);
}
bool null_model(Eigen::VectorXd &y, Eigen::VectorXd &weights, double &coef0) {
coef0 = -log(1 / y.mean() - 1);
return true;
}
};
template <class T4>
class abessLm : public _abessGLM<Eigen::VectorXd, Eigen::VectorXd, double, T4> {
public:
bool clear = true;
Eigen::VectorXd XTy; /*X.transpose() * y */
Eigen::VectorXd XTone; /* X.transpose() * Eigen::MatrixXd::one() */
Eigen::Matrix<Eigen::MatrixXd, -1, -1> PhiG; /* PhiG for linear model. */
Eigen::Matrix<Eigen::MatrixXd, -1, -1> invPhiG; /* invPhiG for linear model. */
Eigen::VectorXd XTy_U;
Eigen::VectorXd XTone_U;
Eigen::Matrix<Eigen::MatrixXd, -1, -1> PhiG_U;
Eigen::Matrix<Eigen::MatrixXd, -1, -1> invPhiG_U;
Eigen::Matrix<T4, -1, -1> group_XTX; /* XTX. */
bool covariance_update; /* use covairance update mathod or not. */
Eigen::VectorXd **covariance = NULL; /* covairance matrix. */
Eigen::VectorXi covariance_update_flag; /* each variable have updated in covairance matirx. */
abessLm(int algorithm_type, int model_type, int max_iter = 30, int primary_model_fit_max_iter = 10,
double primary_model_fit_epsilon = 1e-8, bool warm_start = true, int exchange_num = 5,
Eigen::VectorXi always_select = Eigen::VectorXi::Zero(0), int splicing_type = 0, int sub_search = 0)
: _abessGLM<Eigen::VectorXd, Eigen::VectorXd, double, T4>::_abessGLM(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon, warm_start,
exchange_num, always_select, splicing_type, sub_search){};
~abessLm(){};
void inital_setting(T4 &X, Eigen::VectorXd &y, Eigen::VectorXd &weights, Eigen::VectorXi &g_index,
Eigen::VectorXi &g_size, int &N) {
int n = X.rows(), p = X.cols();
if (this->clear) {
this->group_XTX = compute_group_XTX<T4>(X, g_index, g_size, n, p, N);
if (this->covariance_update) {
// cout<<"create pointer"<<endl;
this->covariance = new Eigen::VectorXd *[p];
this->covariance_update_flag = Eigen::VectorXi::Zero(p);
this->XTy = X.transpose() * y;
this->XTone = X.transpose() * Eigen::MatrixXd::Ones(n, 1);
}
}
if (this->clear || this->lambda_change) {
this->PhiG = Phi(X, g_index, g_size, n, p, N, this->lambda_level, this->group_XTX);
this->invPhiG = invPhi(this->PhiG, N);
this->PhiG_U.resize(N, 1);
this->invPhiG_U.resize(N, 1);
}
this->clear = false;
};
void clear_setting() {
this->clear = true;
// delete pointer
if (this->covariance_update) {
// cout<<"delete pointer"<<endl;
for (int i = 0; i < (this->covariance_update_flag).size(); i++)
if (this->covariance_update_flag(i) == 1) delete this->covariance[i];
delete[] this->covariance;
}
};
bool primary_model_fit(T4 &x, Eigen::VectorXd &y, Eigen::VectorXd &weights, Eigen::VectorXd &beta, double &coef0,
double loss0, Eigen::VectorXi &A, Eigen::VectorXi &g_index, Eigen::VectorXi &g_size) {
// int n = x.rows();
// int p = x.cols();
if (x.cols() == 0) return true;
// to ensure
T4 X_full;
add_constant_column(X_full, x, this->fit_intercept);
// beta = (X.adjoint() * X + this->lambda_level * Eigen::MatrixXd::Identity(X.cols(),
// X.cols())).colPivHouseholderQr().solve(X.adjoint() * y);
Eigen::MatrixXd XTX =
X_full.adjoint() * X_full + this->lambda_level * Eigen::MatrixXd::Identity(X_full.cols(), X_full.cols());
// if (check_ill_condition(XTX)) return false;
Eigen::VectorXd XTy = X_full.adjoint() * y;
Eigen::VectorXd beta_full = XTX.ldlt().solve(XTy);
extract_beta_coef0(beta_full, beta, coef0, this->fit_intercept);
return true;
};
double loss_function(T4 &X, Eigen::VectorXd &y, Eigen::VectorXd &weights, Eigen::VectorXd &beta, double &coef0,
Eigen::VectorXi &A, Eigen::VectorXi &g_index, Eigen::VectorXi &g_size, double lambda) {
int n = X.rows();
Eigen::VectorXd one = Eigen::VectorXd::Ones(n);
return (y - X * beta - coef0 * one).array().square().sum() / n / 2. + lambda * beta.cwiseAbs2().sum();
}
void mapping_U(Eigen::VectorXi &U, Eigen::VectorXi &U_ind) {
int N = U.size(), p = U_ind.size();
if (this->covariance_update)
for (int i = 0; i < p; i++) {
this->XTy_U(i) = this->XTy(U_ind(i), 0);
this->XTone_U(i) = this->XTone(U_ind(i), 0);
}
for (int i = 0; i < N; i++) {
this->PhiG_U(i, 0) = this->PhiG(U(i), 0);
this->invPhiG_U(i, 0) = this->invPhiG(U(i), 0);
}
return;
}
Eigen::MatrixXd covariance_update_f_U(Eigen::VectorXi &U_ind, Eigen::VectorXi &A_ind_U) {
int k = A_ind_U.size(), p = U_ind.size();
Eigen::MatrixXd cov_A(p, k);
for (int i = 0; i < k; i++) {
int Ai = U_ind(A_ind_U(i));
if (this->covariance_update_flag(Ai) == 0) {
this->covariance[Ai] = new Eigen::VectorXd;
*this->covariance[Ai] = (*this->x).transpose() * (*this->x).col(Ai);
this->covariance_update_flag(Ai) = 1;
}
if (p == this->XTy.rows()) {
cov_A.col(i) = *this->covariance[Ai];
} else {
for (int j = 0; j < p; j++) cov_A(j, i) = (*this->covariance[Ai])(U_ind(j));
}
}
return cov_A;
}
void sacrifice(T4 &X, T4 &XA, Eigen::VectorXd &y, Eigen::VectorXd &beta, Eigen::VectorXd &beta_A, double &coef0,
Eigen::VectorXi &A, Eigen::VectorXi &I, Eigen::VectorXd &weights, Eigen::VectorXi &g_index,
Eigen::VectorXi &g_size, int N, Eigen::VectorXi &A_ind, Eigen::VectorXd &bd, Eigen::VectorXi &U,
Eigen::VectorXi &U_ind, int num) {
int p = X.cols();
int n = X.rows();
if (num == 0) {
if (p == this->XTy.rows()) {
this->XTy_U = this->XTy;
this->XTone_U = this->XTone;
this->PhiG_U = this->PhiG;
this->invPhiG_U = this->invPhiG;
} else {
this->XTy_U.resize(p, 1);
this->XTone_U.resize(p, 1);
this->mapping_U(U, U_ind);
}
}
Eigen::VectorXd d;
if (!this->covariance_update) {
Eigen::VectorXd one = Eigen::VectorXd::Ones(n);
if (beta.size() != 0) {
d = X.adjoint() * (y - XA * beta_A - coef0 * one) / double(n) - 2 * this->lambda_level * beta;
} else {
d = X.adjoint() * (y - coef0 * one) / double(n);
}
} else {
Eigen::VectorXd one = Eigen::VectorXd::Ones(n);
if (beta.size() != 0) {
Eigen::VectorXd XTXbeta = this->covariance_update_f_U(U_ind, A_ind) * beta_A;
d = (this->XTy_U - XTXbeta - this->XTone_U * coef0) / double(n) - 2 * this->lambda_level * beta;
} else {
Eigen::VectorXd XTonecoef0 = this->XTone_U * coef0;
d = (this->XTy_U - XTonecoef0) / double(n);
}
}
int A_size = A.size();
int I_size = I.size();
Eigen::VectorXd betabar = Eigen::VectorXd::Zero(p);
Eigen::VectorXd dbar = Eigen::VectorXd::Zero(p);
Eigen::MatrixXd phiG, invphiG;
for (int i = 0; i < N; i++) {
phiG = this->PhiG_U(i, 0);
invphiG = this->invPhiG_U(i, 0);
betabar.segment(g_index(i), g_size(i)) = phiG * beta.segment(g_index(i), g_size(i));
dbar.segment(g_index(i), g_size(i)) = invphiG * d.segment(g_index(i), g_size(i));
}
for (int i = 0; i < A_size; i++) {
bd(A[i]) = betabar.segment(g_index(A[i]), g_size(A[i])).squaredNorm() / g_size(A[i]);
}
for (int i = 0; i < I_size; i++) {
bd(I[i]) = dbar.segment(g_index(I[i]), g_size(I[i])).squaredNorm() / g_size(I[i]);
}
}
double effective_number_of_parameter(T4 &X, T4 &XA, Eigen::VectorXd &y, Eigen::VectorXd &weights,
Eigen::VectorXd &beta, Eigen::VectorXd &beta_A, double &coef0) {
if (this->lambda_level == 0.) {
return XA.cols();
} else {
if (XA.cols() == 0) return 0.;
Eigen::MatrixXd XGbar;
XGbar = XA.transpose() * XA;
Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> adjoint_eigen_solver(XGbar);
double enp = 0.;
for (int i = 0; i < adjoint_eigen_solver.eigenvalues().size(); i++) {
enp += adjoint_eigen_solver.eigenvalues()(i) /
(adjoint_eigen_solver.eigenvalues()(i) + this->lambda_level);
}
return enp;
}
}
};
template <class T4>
class abessPoisson : public _abessGLM<Eigen::VectorXd, Eigen::VectorXd, double, T4> {
public:
abessPoisson(int algorithm_type, int model_type, int max_iter = 30, int primary_model_fit_max_iter = 10,
double primary_model_fit_epsilon = 1e-8, bool warm_start = true, int exchange_num = 5,
Eigen::VectorXi always_select = Eigen::VectorXi::Zero(0), int splicing_type = 0, int sub_search = 0)
: _abessGLM<Eigen::VectorXd, Eigen::VectorXd, double, T4>::_abessGLM(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon, warm_start,
exchange_num, always_select, splicing_type, sub_search){};
~abessPoisson(){};
double Xbeta_range[2] = {-30, 30};
Eigen::MatrixXd gradient_core(T4 &X_full, Eigen::VectorXd &y, Eigen::VectorXd &weights,
Eigen::VectorXd &beta_full) {
Eigen::VectorXd expeta = this->inv_link_function(X_full, beta_full);
Eigen::VectorXd G = (y - expeta).cwiseProduct(weights);
return Eigen::MatrixXd(G);
};
Eigen::VectorXd hessian_core(T4 &X_full, Eigen::VectorXd &y, Eigen::VectorXd &weights, Eigen::VectorXd &beta_full) {
Eigen::VectorXd expeta = this->inv_link_function(X_full, beta_full);
return expeta.cwiseProduct(weights);
};
Eigen::VectorXd inv_link_function(T4 &X_full, Eigen::VectorXd &beta_full) {
Eigen::VectorXd eta = X_full * beta_full;
trunc(eta, Xbeta_range);
Eigen::VectorXd expeta = eta.array().exp();
return expeta;
};
Eigen::VectorXd log_probability(T4 &X_full, Eigen::VectorXd &beta_full, Eigen::VectorXd &y) {
Eigen::VectorXd Xbeta = X_full * beta_full;
Eigen::VectorXd Ey = this->inv_link_function(X_full, beta_full);
return y.cwiseProduct(Xbeta) - Ey;
}
bool null_model(Eigen::VectorXd &y, Eigen::VectorXd &weights, double &coef0) {
coef0 = log(weights.dot(y) / weights.sum());
return true;
}
};
template <class T4>
class abessCox : public _abessGLM<Eigen::VectorXd, Eigen::VectorXd, double, T4> {
public:
Eigen::MatrixXd cox_hessian; /* hessian matrix for cox model. */
Eigen::VectorXd cox_g; /* score function for cox model. */
abessCox(int algorithm_type, int model_type, int max_iter = 30, int primary_model_fit_max_iter = 10,
double primary_model_fit_epsilon = 1e-8, bool warm_start = true, int exchange_num = 5,
Eigen::VectorXi always_select = Eigen::VectorXi::Zero(0), int splicing_type = 0, int sub_search = 0)
: _abessGLM<Eigen::VectorXd, Eigen::VectorXd, double, T4>::_abessGLM(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon, warm_start,
exchange_num, always_select, splicing_type, sub_search){};
~abessCox(){};
double Xbeta_range[2] = {-30, 30};
void inital_setting(T4 &X, Eigen::VectorXd &y, Eigen::VectorXd &weights, Eigen::VectorXi &g_index,
Eigen::VectorXi &g_size, int &N) {
this->cox_g = Eigen::VectorXd::Zero(0);
this->cox_hessian = Eigen::MatrixXd::Zero(0, 0);
}
bool primary_model_fit(T4 &x, Eigen::VectorXd &y, Eigen::VectorXd &weight, Eigen::VectorXd &beta, double &coef0,
double loss0, Eigen::VectorXi &A, Eigen::VectorXi &g_index, Eigen::VectorXi &g_size) {
if (x.cols() == 0) {
coef0 = 0.;
return true;
}
int n = x.rows();
int p = x.cols();
Eigen::MatrixXd lambdamat = Eigen::MatrixXd::Identity(p, p);
Eigen::VectorXd theta(n);
Eigen::MatrixXd one = (Eigen::MatrixXd::Ones(n, n)).triangularView<Eigen::Upper>();
Eigen::MatrixXd x_theta(n, p);
Eigen::VectorXd xij_theta(n);
Eigen::VectorXd cum_theta(n);
Eigen::VectorXd g(p);
Eigen::VectorXd beta0 = beta, beta1;
Eigen::VectorXd cum_eta(n);
Eigen::VectorXd cum_eta2(n);
Eigen::VectorXd cum_eta3(n);
Eigen::MatrixXd h(p, p);
Eigen::VectorXd eta;
Eigen::VectorXd d(p);
double loglik1,
loglik0 = -this->loss_function(x, y, weight, beta0, coef0, A, g_index, g_size, this->lambda_level);
// beta = Eigen::VectorXd::Zero(p);
double step = 1.0;
int l;
for (l = 1; l <= this->primary_model_fit_max_iter; l++) {
eta = x * beta0;
trunc(eta, Xbeta_range);
eta = weight.array() * eta.array().exp();
cum_eta(n - 1) = eta(n - 1);
for (int k = n - 2; k >= 0; k--) {
cum_eta(k) = cum_eta(k + 1) + eta(k);
}
cum_eta2(0) = (y(0) * weight(0)) / cum_eta(0);
for (int k = 1; k <= n - 1; k++) {
cum_eta2(k) = (y(k) * weight(k)) / cum_eta(k) + cum_eta2(k - 1);
}
cum_eta3(0) = (y(0) * weight(0)) / pow(cum_eta(0), 2);
for (int k = 1; k <= n - 1; k++) {
cum_eta3(k) = (y(k) * weight(k)) / pow(cum_eta(k), 2) + cum_eta3(k - 1);
}
h = -cum_eta3.replicate(1, n);
h = h.cwiseProduct(eta.replicate(1, n));
h = h.cwiseProduct(eta.replicate(1, n).transpose());
for (int i = 0; i < n; i++) {
for (int j = i + 1; j < n; j++) {
h(j, i) = h(i, j);
}
}
h.diagonal() = cum_eta2.cwiseProduct(eta) + h.diagonal();
// g = x.transpose() * (weight.cwiseProduct(y) - cum_eta2.cwiseProduct(eta));
// g = g - 2 * this->lambda_level * beta0;
g = weight.cwiseProduct(y) - cum_eta2.cwiseProduct(eta);
Eigen::MatrixXd temp = x.transpose() * h * x;
if (this->approximate_Newton) {
// d = g.cwiseQuotient((x.transpose() * h * x + 2 * this->lambda_level * lambdamat).diagonal());
d = (x.transpose() * g - 2 * this->lambda_level * beta0).cwiseQuotient(temp.diagonal());
} else {
// d = (x.transpose() * h * x + 2 * this->lambda_level * lambdamat).ldlt().solve(g);
// if (check_ill_condition(temp)) return false;
d = temp.ldlt().solve(x.transpose() * g - 2 * this->lambda_level * beta0);
}
// theta = x * beta0;
// for (int i = 0; i < n; i++)
// {
// if (theta(i) > 30)
// theta(i) = 30;
// else if (theta(i) < -30)
// theta(i) = -30;
// }
// theta = theta.array().exp();
// cum_theta = one * theta;
// x_theta = x.array().colwise() * theta.array();
// x_theta = one * x_theta;
// x_theta = x_theta.array().colwise() / cum_theta.array();
// g = (x - x_theta).transpose() * (weights.cwiseProduct(y));
// if (this->approximate_Newton)
// {
// Eigen::VectorXd h(p);
// for (int k1 = 0; k1 < p; k1++)
// {
// xij_theta = (theta.cwiseProduct(x.col(k1))).cwiseProduct(x.col(k1));
// for (int j = n - 2; j >= 0; j--)
// {
// xij_theta(j) = xij_theta(j + 1) + xij_theta(j);
// }
// h(k1) = -(xij_theta.cwiseQuotient(cum_theta) -
// x_theta.col(k1).cwiseProduct(x_theta.col(k1))).dot(weights.cwiseProduct(y));
// }
// d = g.cwiseQuotient(h);
// }
// else
// {
// Eigen::MatrixXd h(p, p);
// for (int k1 = 0; k1 < p; k1++)
// {
// for (int k2 = k1; k2 < p; k2++)
// {
// xij_theta = (theta.cwiseProduct(x.col(k1))).cwiseProduct(x.col(k2));
// for (int j = n - 2; j >= 0; j--)
// {
// xij_theta(j) = xij_theta(j + 1) + xij_theta(j);
// }
// h(k1, k2) = -(xij_theta.cwiseQuotient(cum_theta) -
// x_theta.col(k1).cwiseProduct(x_theta.col(k2))).dot(weights.cwiseProduct(y)); h(k2, k1) = h(k1, k2);
// }
// }
// d = h.ldlt().solve(g);
// }
beta1 = beta0 + step * d;
loglik1 = -this->loss_function(x, y, weight, beta1, coef0, A, g_index, g_size, this->lambda_level);
while (loglik1 < loglik0 && step > this->primary_model_fit_epsilon) {
step = step / 2;
beta1 = beta0 + step * d;
loglik1 = -this->loss_function(x, y, weight, beta1, coef0, A, g_index, g_size, this->lambda_level);
}
bool condition1 =
-(loglik1 + (this->primary_model_fit_max_iter - l - 1) * (loglik1 - loglik0)) + this->tau > loss0;
if (condition1) {
loss0 = -loglik0;
beta = beta0;
this->cox_hessian = h;
this->cox_g = g;
return true;
}
if (loglik1 > loglik0) {
beta0 = beta1;
loglik0 = loglik1;
this->cox_hessian = h;
this->cox_g = g;
}
if (step < this->primary_model_fit_epsilon) {
loss0 = -loglik0;
beta = beta0;
this->cox_hessian = h;
this->cox_g = g;
return true;
}
}
beta = beta0;
return true;
};
double loss_function(T4 &X, Eigen::VectorXd &y, Eigen::VectorXd &weights, Eigen::VectorXd &beta, double &coef0,
Eigen::VectorXi &A, Eigen::VectorXi &g_index, Eigen::VectorXi &g_size, double lambda) {
int n = X.rows();
Eigen::VectorXd eta = X * beta;
trunc(eta, Xbeta_range);
Eigen::VectorXd expeta = eta.array().exp();
Eigen::VectorXd cum_expeta(n);
cum_expeta(n - 1) = expeta(n - 1);
for (int i = n - 2; i >= 0; i--) {
cum_expeta(i) = cum_expeta(i + 1) + expeta(i);
}
Eigen::VectorXd ratio = (expeta.cwiseQuotient(cum_expeta)).array().log();
double loglik_cox = (ratio.cwiseProduct(y)).dot(weights);
return -loglik_cox + lambda * beta.cwiseAbs2().sum();
}
void sacrifice(T4 &X, T4 &XA, Eigen::VectorXd &y, Eigen::VectorXd &beta, Eigen::VectorXd &beta_A, double &coef0,
Eigen::VectorXi &A, Eigen::VectorXi &I, Eigen::VectorXd &weights, Eigen::VectorXi &g_index,
Eigen::VectorXi &g_size, int N, Eigen::VectorXi &A_ind, Eigen::VectorXd &bd, Eigen::VectorXi &U,
Eigen::VectorXi &U_ind, int num) {
int p = X.cols();
int n = X.rows();
Eigen::VectorXd d;
Eigen::MatrixXd h;
Eigen::VectorXd g;
if (this->cox_g.size() != 0) {
h = this->cox_hessian;
g = this->cox_g;
} else {
Eigen::VectorXd cum_eta(n);
Eigen::VectorXd cum_eta2(n);
Eigen::VectorXd cum_eta3(n);
Eigen::VectorXd eta = XA * beta_A;
for (int i = 0; i <= n - 1; i++) {
if (eta(i) < -30.0) eta(i) = -30.0;
if (eta(i) > 30.0) eta(i) = 30.0;
}
eta = weights.array() * eta.array().exp();
cum_eta(n - 1) = eta(n - 1);
for (int k = n - 2; k >= 0; k--) {
cum_eta(k) = cum_eta(k + 1) + eta(k);
}
cum_eta2(0) = (y(0) * weights(0)) / cum_eta(0);
for (int k = 1; k <= n - 1; k++) {
cum_eta2(k) = (y(k) * weights(k)) / cum_eta(k) + cum_eta2(k - 1);
}
cum_eta3(0) = (y(0) * weights(0)) / pow(cum_eta(0), 2);
for (int k = 1; k <= n - 1; k++) {
cum_eta3(k) = (y(k) * weights(k)) / pow(cum_eta(k), 2) + cum_eta3(k - 1);
}
h = -cum_eta3.replicate(1, n);
h = h.cwiseProduct(eta.replicate(1, n));
h = h.cwiseProduct(eta.replicate(1, n).transpose());
for (int i = 0; i < n; i++) {
for (int j = i + 1; j < n; j++) {
h(j, i) = h(i, j);
}
}
h.diagonal() = cum_eta2.cwiseProduct(eta) + h.diagonal();
g = weights.cwiseProduct(y) - cum_eta2.cwiseProduct(eta);
}
d = X.transpose() * g - 2 * this->lambda_level * beta;
// Eigen::VectorXd d = X.transpose() * res;
// Eigen::VectorXd h = weights.array() * pr.array() * (one - pr).array();
int A_size = A.size();
int I_size = I.size();
Eigen::VectorXd betabar = Eigen::VectorXd::Zero(p);
Eigen::VectorXd dbar = Eigen::VectorXd::Zero(p);
for (int i = 0; i < N; i++) {
T4 XG = X.middleCols(g_index(i), g_size(i));
Eigen::MatrixXd XGbar =
XG.transpose() * h * XG + 2 * this->lambda_level * Eigen::MatrixXd::Identity(g_size(i), g_size(i));
Eigen::MatrixXd phiG;
XGbar.sqrt().evalTo(phiG);
Eigen::MatrixXd invphiG = phiG.ldlt().solve(Eigen::MatrixXd::Identity(g_size(i), g_size(i)));
betabar.segment(g_index(i), g_size(i)) = phiG * beta.segment(g_index(i), g_size(i));
dbar.segment(g_index(i), g_size(i)) = invphiG * d.segment(g_index(i), g_size(i));
}
for (int i = 0; i < A_size; i++) {
bd(A[i]) = betabar.segment(g_index(A[i]), g_size(A[i])).squaredNorm() / g_size(A[i]);
}
for (int i = 0; i < I_size; i++) {
bd(I[i]) = dbar.segment(g_index(I[i]), g_size(I[i])).squaredNorm() / g_size(I[i]);
}
}
double effective_number_of_parameter(T4 &X, T4 &XA, Eigen::VectorXd &y, Eigen::VectorXd &weights,
Eigen::VectorXd &beta, Eigen::VectorXd &beta_A, double &coef0) {
if (this->lambda_level == 0.) {
return XA.cols();
} else {
if (XA.cols() == 0) return 0.;
// int p = X.cols();
// int n = X.rows();
Eigen::VectorXd d;
Eigen::MatrixXd h;
Eigen::VectorXd g;
if (this->cox_g.size() != 0) {
h = this->cox_hessian;
// g = this->cox_g;
}
// Eigen::VectorXd d = X.transpose() * res - 2 * this->lambda_level * beta;
// Eigen::VectorXd h = weights.array() * pr.array() * (one - pr).array();
Eigen::MatrixXd XGbar = XA.transpose() * h * XA;
Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> adjoint_eigen_solver(XGbar);
double enp = 0.;
for (int i = 0; i < adjoint_eigen_solver.eigenvalues().size(); i++) {
enp += adjoint_eigen_solver.eigenvalues()(i) /
(adjoint_eigen_solver.eigenvalues()(i) + this->lambda_level);
}
return enp;
}
}
};
template <class T4>
class abessMLm : public _abessGLM<Eigen::MatrixXd, Eigen::MatrixXd, Eigen::VectorXd, T4> {
public:
bool clear = true;
Eigen::MatrixXd XTy; /*X.transpose() * y */
Eigen::MatrixXd XTone; /* X.transpose() * Eigen::MatrixXd::one() */
Eigen::Matrix<Eigen::MatrixXd, -1, -1> PhiG; /* PhiG for linear model. */
Eigen::Matrix<Eigen::MatrixXd, -1, -1> invPhiG; /* invPhiG for linear model. */
Eigen::MatrixXd XTy_U;
Eigen::MatrixXd XTone_U;
Eigen::Matrix<Eigen::MatrixXd, -1, -1> PhiG_U;
Eigen::Matrix<Eigen::MatrixXd, -1, -1> invPhiG_U;
Eigen::Matrix<T4, -1, -1> group_XTX; /* XTX. */
bool covariance_update; /* use covairance update mathod or not. */
Eigen::VectorXd **covariance = NULL; /* covairance matrix. */
Eigen::VectorXi covariance_update_flag; /* each variable have updated in covairance matirx. */
abessMLm(int algorithm_type, int model_type, int max_iter = 30, int primary_model_fit_max_iter = 10,
double primary_model_fit_epsilon = 1e-8, bool warm_start = true, int exchange_num = 5,
Eigen::VectorXi always_select = Eigen::VectorXi::Zero(0), int splicing_type = 0, int sub_search = 0)
: _abessGLM<Eigen::MatrixXd, Eigen::MatrixXd, Eigen::VectorXd, T4>::_abessGLM(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon, warm_start,
exchange_num, always_select, splicing_type, sub_search){};
~abessMLm(){};
void inital_setting(T4 &X, Eigen::MatrixXd &y, Eigen::VectorXd &weights, Eigen::VectorXi &g_index,
Eigen::VectorXi &g_size, int &N) {
int n = X.rows(), p = X.cols();
if (this->clear) {
this->group_XTX = compute_group_XTX<T4>(X, g_index, g_size, n, p, N);
if (this->covariance_update) {
this->covariance = new Eigen::VectorXd *[p];
this->covariance_update_flag = Eigen::VectorXi::Zero(p);
this->XTy = X.transpose() * y;
this->XTone = X.transpose() * Eigen::MatrixXd::Ones(n, y.cols());
}
}
if (this->clear || this->lambda_change) {
this->PhiG = Phi(X, g_index, g_size, n, p, N, this->lambda_level, this->group_XTX);
this->invPhiG = invPhi(this->PhiG, N);
this->PhiG_U.resize(N, 1);
this->invPhiG_U.resize(N, 1);
}
this->clear = false;
};
void clear_setting() {
this->clear = true;
// delete pointer
if (this->covariance_update) {
for (int i = 0; i < (this->covariance_update_flag).size(); i++)
if (this->covariance_update_flag(i) == 1) delete this->covariance[i];
delete[] this->covariance;
}
};
bool primary_model_fit(T4 &x, Eigen::MatrixXd &y, Eigen::VectorXd &weights, Eigen::MatrixXd &beta,
Eigen::VectorXd &coef0, double loss0, Eigen::VectorXi &A, Eigen::VectorXi &g_index,
Eigen::VectorXi &g_size) {
// beta = (X.adjoint() * X + this->lambda_level * Eigen::MatrixXd::Identity(X.cols(),
// X.cols())).colPivHouseholderQr().solve(X.adjoint() * y);
int n = x.rows();
int p = x.cols();
int M = y.cols();
if (p == 0) return true;
// to ensure
T4 X;
add_constant_column(X, x, this->fit_intercept);
// beta = (X.adjoint() * X + this->lambda_level * Eigen::MatrixXd::Identity(X.cols(),
// X.cols())).colPivHouseholderQr().solve(X.adjoint() * y);
Eigen::MatrixXd XTX = X.adjoint() * X + this->lambda_level * Eigen::MatrixXd::Identity(X.cols(), X.cols());
// if (check_ill_condition(XTX)) return false;
Eigen::MatrixXd beta0 = XTX.ldlt().solve(X.adjoint() * y);
extract_beta_coef0(beta0, beta, coef0, this->fit_intercept);
return true;
// if (X.cols() == 0)
// {
// // coef0 = y.colwise().sum();
// return;
// }
//
// // overload_ldlt(X, X, y, beta);
// Eigen::MatrixXd XTX = X.transpose() * X;
// beta = (XTX + this->lambda_level * Eigen::MatrixXd::Identity(X.cols(), X.cols())).ldlt().solve(X.transpose()
// * y);
// CG
// ConjugateGradient<T4, Lower | Upper> cg;
// cg.compute(X.adjoint() * X + this->lambda_level * Eigen::MatrixXd::Identity(X.cols(), X.cols()));
// beta = cg.solveWithGuess(X.adjoint() * y, beta);
};
double loss_function(T4 &X, Eigen::MatrixXd &y, Eigen::VectorXd &weights, Eigen::MatrixXd &beta,
Eigen::VectorXd &coef0, Eigen::VectorXi &A, Eigen::VectorXi &g_index, Eigen::VectorXi &g_size,
double lambda) {
int n = X.rows();
Eigen::MatrixXd one = Eigen::MatrixXd::Ones(n, y.cols());
return (y - X * beta - array_product(one, coef0)).array().square().sum() / n / 2.0 +
lambda * beta.cwiseAbs2().sum();
}
void mapping_U(Eigen::VectorXi &U, Eigen::VectorXi &U_ind) {
int N = U.size(), p = U_ind.size(), M = this->XTy.cols();
if (this->covariance_update)
for (int i = 0; i < p; i++)
for (int j = 0; j < M; j++) {
this->XTy_U(i, j) = this->XTy(U_ind(i), j);
this->XTone_U(i, j) = this->XTone(U_ind(i), j);
}
for (int i = 0; i < N; i++) {
this->PhiG_U(i, 0) = this->PhiG(U(i), 0);
this->invPhiG_U(i, 0) = this->invPhiG(U(i), 0);
}
return;
}
Eigen::MatrixXd covariance_update_f_U(Eigen::VectorXi &U_ind, Eigen::VectorXi &A_ind_U) {
int k = A_ind_U.size(), p = U_ind.size();
Eigen::MatrixXd cov_A(p, k);
for (int i = 0; i < k; i++) {
int Ai = U_ind(A_ind_U(i));
if (this->covariance_update_flag(Ai) == 0) {
this->covariance[Ai] = new Eigen::VectorXd;
*this->covariance[Ai] = (*this->x).transpose() * (*this->x).col(Ai);
this->covariance_update_flag(Ai) = 1;
}
if (p == this->XTy.rows()) {
cov_A.col(i) = *this->covariance[Ai];
} else {
for (int j = 0; j < p; j++) cov_A(j, i) = (*this->covariance[Ai])(U_ind(j));
}
}
return cov_A;
}
void sacrifice(T4 &X, T4 &XA, Eigen::MatrixXd &y, Eigen::MatrixXd &beta, Eigen::MatrixXd &beta_A,
Eigen::VectorXd &coef0, Eigen::VectorXi &A, Eigen::VectorXi &I, Eigen::VectorXd &weights,
Eigen::VectorXi &g_index, Eigen::VectorXi &g_size, int N, Eigen::VectorXi &A_ind,
Eigen::VectorXd &bd, Eigen::VectorXi &U, Eigen::VectorXi &U_ind, int num) {
int p = X.cols();
int n = X.rows();
int M = y.cols();
if (num == 0) {
if (p == this->XTy.rows()) {
this->XTy_U = this->XTy;
this->XTone_U = this->XTone;
this->PhiG_U = this->PhiG;
this->invPhiG_U = this->invPhiG;
} else {
this->XTy_U.resize(p, M);
this->XTone_U.resize(p, M);
this->mapping_U(U, U_ind);
}
}
Eigen::MatrixXd d;
if (!this->covariance_update) {
Eigen::MatrixXd one = Eigen::MatrixXd::Ones(n, y.cols());
if (beta.size() != 0) {
d = X.adjoint() * (y - XA * beta_A - array_product(one, coef0)) / double(n) -
2 * this->lambda_level * beta;
} else {
d = X.adjoint() * (y - array_product(one, coef0)) / double(n);
}
} else {
if (beta.size() != 0) {
Eigen::MatrixXd XTXbeta = this->covariance_update_f_U(U_ind, A_ind) * beta_A;
d = (this->XTy_U - XTXbeta - array_product(this->XTone_U, coef0)) / double(n) -
2 * this->lambda_level * beta;
} else {
Eigen::MatrixXd XTonecoef0 = array_product(this->XTone_U, coef0);
d = (this->XTy_U - XTonecoef0) / double(n);
}
}
int A_size = A.size();
int I_size = I.size();
Eigen::MatrixXd betabar = Eigen::MatrixXd::Zero(p, M);
Eigen::MatrixXd dbar = Eigen::MatrixXd::Zero(p, M);
Eigen::MatrixXd phiG, invphiG;
for (int i = 0; i < N; i++) {
phiG = this->PhiG_U(i, 0);
invphiG = this->invPhiG_U(i, 0);
betabar.block(g_index(i), 0, g_size(i), M) = phiG * beta.block(g_index(i), 0, g_size(i), M);
dbar.block(g_index(i), 0, g_size(i), M) = invphiG * d.block(g_index(i), 0, g_size(i), M);
}
for (int i = 0; i < A_size; i++) {
bd(A[i]) = betabar.block(g_index(A[i]), 0, g_size(A[i]), M).squaredNorm() / g_size(A[i]);
}
for (int i = 0; i < I_size; i++) {
bd(I[i]) = dbar.block(g_index(I[i]), 0, g_size(I[i]), M).squaredNorm() / g_size(I[i]);
}
}
double effective_number_of_parameter(T4 &X, T4 &XA, Eigen::MatrixXd &y, Eigen::VectorXd &weights,
Eigen::MatrixXd &beta, Eigen::MatrixXd &beta_A, Eigen::VectorXd &coef0) {
if (this->lambda_level == 0.) {
return XA.cols();
} else {
if (XA.cols() == 0) return 0.;
Eigen::MatrixXd XGbar;
XGbar = XA.transpose() * XA;
Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> adjoint_eigen_solver(XGbar);
double enp = 0.;
for (int i = 0; i < adjoint_eigen_solver.eigenvalues().size(); i++) {
enp += adjoint_eigen_solver.eigenvalues()(i) /
(adjoint_eigen_solver.eigenvalues()(i) + this->lambda_level);
}
return enp;
}
}
};
template <class T4>
class abessMultinomial : public _abessGLM<Eigen::MatrixXd, Eigen::MatrixXd, Eigen::VectorXd, T4> {
public:
abessMultinomial(int algorithm_type, int model_type, int max_iter = 30, int primary_model_fit_max_iter = 10,
double primary_model_fit_epsilon = 1e-8, bool warm_start = true, int exchange_num = 5,
Eigen::VectorXi always_select = Eigen::VectorXi::Zero(0), int splicing_type = 0,
int sub_search = 0)
: _abessGLM<Eigen::MatrixXd, Eigen::MatrixXd, Eigen::VectorXd, T4>::_abessGLM(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon, warm_start,
exchange_num, always_select, splicing_type, sub_search){};
~abessMultinomial(){};
double PiPj_range[2] = {0.001, 1}; // Pi * Pj
bool primary_model_fit(T4 &x, Eigen::MatrixXd &y, Eigen::VectorXd &weights, Eigen::MatrixXd &beta,
Eigen::VectorXd &coef0, double loss0, Eigen::VectorXi &A, Eigen::VectorXi &g_index,
Eigen::VectorXi &g_size) {
// if (X.cols() == 0)
// {
// coef0 = -log(y.colwise().sum().eval() - 1.0);
// return;
// }
int n = x.rows();
int p = x.cols();
int M = y.cols();
if (p == 0) return true;
T4 X;
add_constant_column(X, x, this->fit_intercept);
Eigen::MatrixXd lambdamat = Eigen::MatrixXd::Identity(X.cols(), X.cols());
Eigen::MatrixXd beta0;
combine_beta_coef0(beta0, beta, coef0, this->fit_intercept);
Eigen::MatrixXd one_vec = Eigen::VectorXd::Ones(n);
Eigen::MatrixXd Pi;
pi(X, y, beta0, Pi);
Eigen::MatrixXd log_Pi = Pi.array().log();
array_product(log_Pi, weights, 1);
double loglik1 = DBL_MAX,
loglik0 = (log_Pi.array() * y.array()).sum() - this->lambda_level * beta.cwiseAbs2().sum();
int j;
if (this->approximate_Newton) {
Eigen::MatrixXd one = Eigen::MatrixXd::Ones(n, M);
double t = 2 * (Pi.array() * (one - Pi).array()).maxCoeff();
Eigen::MatrixXd res = y - Pi;
array_product(res, weights, 1);
Eigen::MatrixXd XTres = X.transpose() * res / t;
// ConjugateGradient<MatrixXd, Lower | Upper> cg;
// cg.compute(X.adjoint() * X);
Eigen::MatrixXd XTX =
X.transpose() * X + this->lambda_level * Eigen::MatrixXd::Identity(X.cols(), X.cols());
// if (check_ill_condition(XTX)) return false;
Eigen::MatrixXd invXTX = XTX.ldlt().solve(Eigen::MatrixXd::Identity(X.cols(), X.cols()));
Eigen::MatrixXd beta1;
for (j = 0; j < this->primary_model_fit_max_iter; j++) {
// beta1 = beta0 + cg.solve(res);
beta1 = beta0 + invXTX * XTres;
// double app_loss0, app_loss1, app_loss2;
// app_loss0 = ((y - Pi) / t).squaredNorm();
// app_loss1 = (-X * beta0 - (y - Pi) / t).squaredNorm();
// app_loss2 = (X * (beta1 - beta0) - (y - Pi) / t).squaredNorm();
pi(X, y, beta1, Pi);
log_Pi = Pi.array().log();
array_product(log_Pi, weights, 1);
loglik1 = (log_Pi.array() * y.array()).sum() - this->lambda_level * beta.cwiseAbs2().sum();
bool condition1 =
-(loglik1 + (this->primary_model_fit_max_iter - j - 1) * (loglik1 - loglik0)) + this->tau > loss0;
// bool condition1 = false;
bool condition2 = abs(loglik0 - loglik1) / (0.1 + abs(loglik1)) < this->primary_model_fit_epsilon;
bool condition3 = abs(loglik1) < min(1e-3, this->tau);
bool condition4 = loglik1 < loglik0;
// bool condition4 = false;
if (condition1 || condition2 || condition3 || condition4) {
break;
}
loglik0 = loglik1;
for (int m1 = 0; m1 < M; m1++) {
beta0.col(m1) = beta1.col(m1) - beta1.col(M - 1);
}
// beta0 = beta1;
t = 2 * (Pi.array() * (one - Pi).array()).maxCoeff();
res = y - Pi;
array_product(res, weights, 1);
XTres = X.transpose() * res / t;
}
} else {
Eigen::MatrixXd W(M * n, M * n);
Eigen::VectorXd one = Eigen::VectorXd::Ones(n);
for (int m1 = 0; m1 < M; m1++) {
for (int m2 = m1; m2 < M; m2++) {
if (m1 == m2) {
W.block(m1 * n, m2 * n, n, n) = Eigen::MatrixXd::Zero(n, n);
Eigen::VectorXd PiPj = Pi.col(m1).array() * (one - Pi.col(m1).eval()).array() * weights.array();
trunc(PiPj, PiPj_range);
W.block(m1 * n, m2 * n, n, n).diagonal() = PiPj;
} else {
W.block(m1 * n, m2 * n, n, n) = Eigen::MatrixXd::Zero(n, n);
Eigen::VectorXd PiPj = Pi.col(m1).array() * Pi.col(m2).array() * weights.array();
trunc(PiPj, PiPj_range);
W.block(m1 * n, m2 * n, n, n).diagonal() = -PiPj;
W.block(m2 * n, m1 * n, n, n) = W.block(m1 * n, m2 * n, n, n);
}
}
}
Eigen::MatrixXd XTWX(M * X.cols(), M * X.cols());
Eigen::MatrixXd XTW(M * X.cols(), M * n);
for (int m1 = 0; m1 < M; m1++) {
for (int m2 = m1; m2 < M; m2++) {
XTW.block(m1 * X.cols(), m2 * n, X.cols(), n) = X.transpose() * W.block(m1 * n, m2 * n, n, n);
XTWX.block(m1 * X.cols(), m2 * X.cols(), X.cols(), X.cols()) =
XTW.block(m1 * X.cols(), m2 * n, X.cols(), n) * X + 2 * this->lambda_level * lambdamat;
XTW.block(m2 * X.cols(), m1 * n, X.cols(), n) = XTW.block(m1 * X.cols(), m2 * n, X.cols(), n);
XTWX.block(m2 * X.cols(), m1 * X.cols(), X.cols(), X.cols()) =
XTWX.block(m1 * X.cols(), m2 * X.cols(), X.cols(), X.cols());
}
}
// Eigen::Matrix<Eigen::MatrixXd, -1, -1> res(M, 1);
Eigen::VectorXd res(M * n);
for (int m1 = 0; m1 < M; m1++) {
res.segment(m1 * n, n) = y.col(m1).eval() - Pi.col(m1).eval();
}
Eigen::VectorXd Xbeta(M * n);
for (int m1 = 0; m1 < M; m1++) {
Xbeta.segment(m1 * n, n) = X * beta0.col(m1).eval();
}
Eigen::VectorXd Z = Xbeta + W.ldlt().solve(res);
Eigen::MatrixXd beta1;
Eigen::VectorXd beta0_tmp;
for (j = 0; j < this->primary_model_fit_max_iter; j++) {
beta0_tmp = XTWX.ldlt().solve(XTW * Z);
for (int m1 = 0; m1 < M; m1++) {
beta0.col(m1) =
beta0_tmp.segment(m1 * X.cols(), X.cols()) - beta0_tmp.segment((M - 1) * X.cols(), X.cols());
}
for (int m1 = 0; m1 < M; m1++) {
beta0.col(m1) = beta0_tmp.segment(m1 * X.cols(), X.cols());
}
pi(X, y, beta0, Pi);
log_Pi = Pi.array().log();
array_product(log_Pi, weights, 1);
loglik1 = (log_Pi.array() * y.array()).sum() - this->lambda_level * beta.cwiseAbs2().sum();
bool condition1 =
-(loglik1 + (this->primary_model_fit_max_iter - j - 1) * (loglik1 - loglik0)) + this->tau > loss0;
// bool condition1 = false;
bool condition2 = abs(loglik0 - loglik1) / (0.1 + abs(loglik1)) < this->primary_model_fit_epsilon;
bool condition3 = abs(loglik1) < min(1e-3, this->tau);
bool condition4 = loglik1 < loglik0;
if (condition1 || condition2 || condition3 || condition4) {
break;
}
loglik0 = loglik1;
for (int m1 = 0; m1 < M; m1++) {
for (int m2 = m1; m2 < M; m2++) {
if (m1 == m2) {
// W(m1, m2) = Eigen::MatrixXd::Zero(n, n);
// W(m1, m2).diagonal() = Pi.col(m1).array() * (one - Pi.col(m1).eval()).array();
W.block(m1 * n, m2 * n, n, n) = Eigen::MatrixXd::Zero(n, n);
Eigen::VectorXd PiPj = Pi.col(m1).array() * (one - Pi.col(m1).eval()).array();
trunc(PiPj, PiPj_range);
W.block(m1 * n, m2 * n, n, n).diagonal() = PiPj;
} else {
W.block(m1 * n, m2 * n, n, n) = Eigen::MatrixXd::Zero(n, n);
Eigen::VectorXd PiPj = Pi.col(m1).array() * Pi.col(m2).array();
trunc(PiPj, PiPj_range);
W.block(m1 * n, m2 * n, n, n).diagonal() = -PiPj;
W.block(m2 * n, m1 * n, n, n) = W.block(m1 * n, m2 * n, n, n);
}
}
}
for (int m1 = 0; m1 < M; m1++) {
for (int m2 = m1; m2 < M; m2++) {
XTW.block(m1 * X.cols(), m2 * n, X.cols(), n) = X.transpose() * W.block(m1 * n, m2 * n, n, n);
XTWX.block(m1 * X.cols(), m2 * X.cols(), X.cols(), X.cols()) =
XTW.block(m1 * X.cols(), m2 * n, X.cols(), n) * X + 2 * this->lambda_level * lambdamat;
XTW.block(m2 * X.cols(), m1 * n, X.cols(), n) = XTW.block(m1 * X.cols(), m2 * n, X.cols(), n);
XTWX.block(m2 * X.cols(), m1 * X.cols(), X.cols(), X.cols()) =
XTWX.block(m1 * X.cols(), m2 * X.cols(), X.cols(), X.cols());
}
}
for (int m1 = 0; m1 < M; m1++) {
res.segment(m1 * n, n) = y.col(m1).eval() - Pi.col(m1).eval();
}
for (int m1 = 0; m1 < M; m1++) {
Xbeta.segment(m1 * n, n) = X * beta0.col(m1).eval();
}
Z = Xbeta + W.ldlt().solve(res);
}
}
extract_beta_coef0(beta0, beta, coef0, this->fit_intercept);
return true;
};
double loss_function(T4 &X, Eigen::MatrixXd &y, Eigen::VectorXd &weights, Eigen::MatrixXd &beta,
Eigen::VectorXd &coef0, Eigen::VectorXi &A, Eigen::VectorXi &g_index, Eigen::VectorXi &g_size,
double lambda) {
// weight
Eigen::MatrixXd pr;
pi(X, y, beta, coef0, pr);
Eigen::MatrixXd log_pr = pr.array().log();
// Eigen::VectorXd one_vec = Eigen::VectorXd::Ones(X.rows());
array_product(log_pr, weights, 1);
return -((log_pr.array() * y.array()).sum()) + lambda * beta.cwiseAbs2().sum();
}
void sacrifice(T4 &X, T4 &XA, Eigen::MatrixXd &y, Eigen::MatrixXd &beta, Eigen::MatrixXd &beta_A,
Eigen::VectorXd &coef0, Eigen::VectorXi &A, Eigen::VectorXi &I, Eigen::VectorXd &weights,
Eigen::VectorXi &g_index, Eigen::VectorXi &g_size, int N, Eigen::VectorXi &A_ind,
Eigen::VectorXd &bd, Eigen::VectorXi &U, Eigen::VectorXi &U_ind, int num) {
int p = X.cols();
int n = X.rows();
int M = y.cols();
Eigen::MatrixXd d;
Eigen::MatrixXd h;
Eigen::MatrixXd pr;
pi(XA, y, beta_A, coef0, pr);
Eigen::MatrixXd Pi = pr.leftCols(M - 1);
Eigen::MatrixXd res = (y.leftCols(M - 1) - Pi);
for (int i = 0; i < n; i++) {
res.row(i) = res.row(i) * weights(i);
}
d = X.transpose() * res - 2 * this->lambda_level * beta;
h = Pi;
// int A_size = A.size();
// int I_size = I.size();
Eigen::MatrixXd betabar = Eigen::MatrixXd::Zero(p, M);
Eigen::MatrixXd dbar = Eigen::MatrixXd::Zero(p, M);
Eigen::MatrixXd phiG, invphiG;
for (int i = 0; i < N; i++) {
T4 XG = X.middleCols(g_index(i), g_size(i));
T4 XG_new(h.rows(), h.cols());
for (int m = 0; m < M - 1; m++) {
XG_new.col(m) = h.col(m).cwiseProduct(XG).cwiseProduct(weights);
}
Eigen::MatrixXd XGbar = -XG_new.transpose() * XG_new;
XGbar.diagonal() = Eigen::VectorXd(XG_new.transpose() * XG) + XGbar.diagonal();
XGbar = XGbar + 2 * this->lambda_level * Eigen::MatrixXd::Identity(M - 1, M - 1);
// Eigen::MatrixXd phiG;
// XGbar.sqrt().evalTo(phiG);
// Eigen::MatrixXd invphiG = phiG.ldlt().solve(Eigen::MatrixXd::Identity(M, M));
// betabar.block(g_index(i), 0, g_size(i), M) = phiG * beta.block(g_index(i), 0, g_size(i), M);
// dbar.block(g_index(i), 0, g_size(i), M) = invphiG * d.block(g_index(i), 0, g_size(i), M);
Eigen::MatrixXd invXGbar = XGbar.ldlt().solve(Eigen::MatrixXd::Identity(M - 1, M - 1));
Eigen::MatrixXd temp =
d.block(g_index(i), 0, g_size(i), M - 1) * invXGbar + beta.block(g_index(i), 0, g_size(i), M - 1);
bd(i) = (temp * XGbar * temp.transpose()).squaredNorm() / g_size(i);
}
// for (int i = 0; i < A_size; i++)
// {
// bd(A[i]) = betabar.block(g_index(A[i]), 0, g_size(A[i]), M).squaredNorm() / g_size(A[i]);
// }
// for (int i = 0; i < I_size; i++)
// {
// bd(I[i]) = dbar.block(g_index(I[i]), 0, g_size(I[i]), M).squaredNorm() / g_size(I[i]);
// }
}
double effective_number_of_parameter(T4 &x, T4 &XA, Eigen::MatrixXd &y, Eigen::VectorXd &weights,
Eigen::MatrixXd &beta, Eigen::MatrixXd &beta_A, Eigen::VectorXd &coef0) {
if (this->lambda_level == 0.) {
return XA.cols();
} else {
if (XA.cols() == 0) return 0.;
int n = XA.rows();
int p = XA.cols();
int M = y.cols();
T4 X(n, p + 1);
X.rightCols(p) = XA;
add_constant_column(X);
Eigen::MatrixXd lambdamat = Eigen::MatrixXd::Identity(p + 1, p + 1);
Eigen::MatrixXd beta0 = Eigen::MatrixXd::Zero(p + 1, M);
Eigen::MatrixXd one_vec = Eigen::VectorXd::Ones(n);
beta0.row(0) = coef0;
beta0.block(1, 0, p, M) = beta;
Eigen::MatrixXd Pi;
pi(X, y, beta0, Pi);
Eigen::MatrixXd W(M * n, M * n);
Eigen::VectorXd one = Eigen::VectorXd::Ones(n);
for (int m1 = 0; m1 < M; m1++) {
for (int m2 = m1; m2 < M; m2++) {
if (m1 == m2) {
W.block(m1 * n, m2 * n, n, n) = Eigen::MatrixXd::Zero(n, n);
Eigen::VectorXd PiPj = Pi.col(m1).array() * (one - Pi.col(m1).eval()).array() * weights.array();
trunc(PiPj, PiPj_range);
W.block(m1 * n, m2 * n, n, n).diagonal() = PiPj;
} else {
W.block(m1 * n, m2 * n, n, n) = Eigen::MatrixXd::Zero(n, n);
Eigen::VectorXd PiPj = Pi.col(m1).array() * Pi.col(m2).array() * weights.array();
trunc(PiPj, PiPj_range);
W.block(m1 * n, m2 * n, n, n).diagonal() = -PiPj;
W.block(m2 * n, m1 * n, n, n) = W.block(m1 * n, m2 * n, n, n);
}
}
}
Eigen::MatrixXd XTWX(M * (p + 1), M * (p + 1));
Eigen::MatrixXd XTW(M * (p + 1), M * n);
for (int m1 = 0; m1 < M; m1++) {
for (int m2 = m1; m2 < M; m2++) {
XTW.block(m1 * (p + 1), m2 * n, (p + 1), n) = X.transpose() * W.block(m1 * n, m2 * n, n, n);
XTWX.block(m1 * (p + 1), m2 * (p + 1), (p + 1), (p + 1)) =
XTW.block(m1 * (p + 1), m2 * n, (p + 1), n) * X + 2 * this->lambda_level * lambdamat;
XTW.block(m2 * (p + 1), m1 * n, (p + 1), n) = XTW.block(m1 * (p + 1), m2 * n, (p + 1), n);
XTWX.block(m2 * (p + 1), m1 * (p + 1), (p + 1), (p + 1)) =
XTWX.block(m1 * (p + 1), m2 * (p + 1), (p + 1), (p + 1));
}
}
// Eigen::VectorXd d = X.transpose() * res - 2 * this->lambda_level * beta;
// Eigen::VectorXd h = weights.array() * pr.array() * (one - pr).array();
// T4 XA_new = XA;
// for (int j = 0; j < XA.cols(); j++)
// {
// XA_new.col(j) = XA.col(j).cwiseProduct(h);
// }
// Eigen::MatrixXd XGbar;
// XGbar = XA_new.transpose() * XA;
Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> adjoint_eigen_solver(XTWX);
double enp = 0.;
for (int i = 0; i < adjoint_eigen_solver.eigenvalues().size(); i++) {
enp += adjoint_eigen_solver.eigenvalues()(i) /
(adjoint_eigen_solver.eigenvalues()(i) + this->lambda_level);
}
return enp;
}
}
};
template <class T4>
class abessGamma : public _abessGLM<Eigen::VectorXd, Eigen::VectorXd, double, T4> {
public:
abessGamma(int algorithm_type, int model_type, int max_iter = 30, int primary_model_fit_max_iter = 10,
double primary_model_fit_epsilon = 1e-8, bool warm_start = true, int exchange_num = 5,
Eigen::VectorXi always_select = Eigen::VectorXi::Zero(0), int splicing_type = 0, int sub_search = 0)
: _abessGLM<Eigen::VectorXd, Eigen::VectorXd, double, T4>::_abessGLM(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon, warm_start,
exchange_num, always_select, splicing_type, sub_search){};
~abessGamma(){};
double Xbeta_range[2] = {-DBL_MAX, -1e-20}; // the range of acceptable value for X * beta
Eigen::MatrixXd gradient_core(T4 &X_full, Eigen::VectorXd &y, Eigen::VectorXd &weights,
Eigen::VectorXd &beta_full) {
Eigen::VectorXd EY = this->inv_link_function(X_full, beta_full);
Eigen::VectorXd G = (y - EY).cwiseProduct(weights);
return Eigen::MatrixXd(G);
};
Eigen::VectorXd hessian_core(T4 &X_full, Eigen::VectorXd &y, Eigen::VectorXd &weights, Eigen::VectorXd &beta_full) {
Eigen::VectorXd EY = this->inv_link_function(X_full, beta_full);
Eigen::VectorXd W = EY.array().square();
return W.cwiseProduct(weights);
};
Eigen::VectorXd inv_link_function(T4 &X_full, Eigen::VectorXd &beta_full) {
Eigen::VectorXd eta = X_full * beta_full;
trunc(eta, Xbeta_range);
return -eta.cwiseInverse();
}
Eigen::VectorXd log_probability(T4 &X_full, Eigen::VectorXd &beta_full, Eigen::VectorXd &y) {
Eigen::VectorXd eta = X_full * beta_full;
trunc(eta, Xbeta_range);
return (-eta).array().log().matrix() + eta.cwiseProduct(y);
}
bool null_model(Eigen::VectorXd &y, Eigen::VectorXd &weights, double &coef0) {
coef0 = -weights.sum() / weights.dot(y);
return true;
}
bool primary_model_fit(T4 &X, Eigen::VectorXd &y, Eigen::VectorXd &weights, Eigen::VectorXd &beta, double &coef0,
double loss0, Eigen::VectorXi &A, Eigen::VectorXi &g_index, Eigen::VectorXi &g_size) {
if (X.cols() == 0) return null_model(y, weights, coef0);
Eigen::VectorXd temp = X * beta + coef0 * Eigen::VectorXd::Ones(X.rows());
if (temp.maxCoeff() > this->Xbeta_range[1]) {
coef0 -= abs(temp.maxCoeff()) + 0.1;
}
if (this->approximate_Newton) {
// Fitting method 1: Approximate Newton
return this->_approx_newton_fit(X, y, weights, beta, coef0, loss0, A, g_index, g_size);
} else {
// Fitting method 2: Iteratively Reweighted Least Squares
return this->_IRLS_fit(X, y, weights, beta, coef0, loss0, A, g_index, g_size);
}
return true;
};
};
template <class T4>
class abessOrdinal : public _abessGLM<Eigen::MatrixXd, Eigen::MatrixXd, Eigen::VectorXd, T4> {
public:
abessOrdinal(int algorithm_type, int model_type, int max_iter = 30, int primary_model_fit_max_iter = 10,
double primary_model_fit_epsilon = 1e-8, bool warm_start = true, int exchange_num = 5,
Eigen::VectorXi always_select = Eigen::VectorXi::Zero(0), int splicing_type = 0, int sub_search = 0)
: _abessGLM<Eigen::MatrixXd, Eigen::MatrixXd, Eigen::VectorXd, T4>::_abessGLM(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon, warm_start,
exchange_num, always_select, splicing_type, sub_search){};
~abessOrdinal(){};
// We only use beta.col(0) and coef0.head(k) which k = coef0.size() - 1 rather than the whole beta and coef0.
bool gradient(T4 &X, Eigen::MatrixXd &y, Eigen::VectorXd &weights, Eigen::MatrixXd &beta, Eigen::VectorXd &coef0,
Eigen::VectorXd &g) {
const int n = X.rows();
const int p = X.cols();
const int k = coef0.size() - 1;
if (g.size() != p && g.size() != p + k) {
return false;
}
Eigen::VectorXd coef(p + k);
coef.head(k) = coef0.head(k);
coef.tail(p) = beta.col(0);
Eigen::MatrixXd logit(n, k);
Eigen::MatrixXd P(n, k + 1);
Eigen::MatrixXd grad_L(n, k);
Eigen::VectorXd xbeta = X * coef.tail(p);
// compute logit
for (int i1 = 0; i1 < n; i1++) {
for (int i2 = 0; i2 < k; i2++) {
logit(i1, i2) = 1.0 / (1 + exp(-xbeta(i1) - coef(i2)));
}
}
// compute P
for (int i1 = 0; i1 < n; i1++) {
for (int i2 = 0; i2 < k + 1; i2++) {
if (i2 == 0) {
P(i1, 0) = logit(i1, 0);
} else if (i2 == k) {
P(i1, k) = 1 - logit(i1, k - 1);
} else {
P(i1, i2) = logit(i1, i2) - logit(i1, i2 - 1);
}
if (P(i1, i2) < 1e-10) P(i1, i2) = 1e-10;
}
}
// compute gradient
for (int i1 = 0; i1 < n; i1++) {
for (int i2 = 0; i2 < k; i2++) {
grad_L(i1, i2) = (y(i1, i2) / P(i1, i2) - y(i1, i2 + 1) / P(i1, i2 + 1)) * logit(i1, i2) *
(1.0 - logit(i1, i2)) * weights(i1);
}
}
if (g.size() == p + k) {
g.head(k) = grad_L.colwise().sum();
}
g.tail(p) = grad_L.rowwise().sum().transpose() * X - 2 * this->lambda_level * coef.tail(p).eval();
return true;
}
bool hessianCore(T4 &X, Eigen::MatrixXd &y, Eigen::VectorXd &weights, Eigen::MatrixXd &beta, Eigen::VectorXd &coef0,
Eigen::VectorXd &h_intercept, Eigen::VectorXd &W) {
const int n = X.rows();
const int p = X.cols();
const int k = coef0.size() - 1;
if (h_intercept.size() != k || W.size() != n) {
return false;
}
Eigen::VectorXd coef(p + k);
coef.head(k) = coef0.head(k);
coef.tail(p) = beta.col(0);
Eigen::MatrixXd logit(n, k);
Eigen::MatrixXd P(n, k + 1);
Eigen::VectorXd xbeta = X * coef.tail(p);
// compute logit
for (int i1 = 0; i1 < n; i1++) {
for (int i2 = 0; i2 < k; i2++) {
logit(i1, i2) = 1.0 / (1 + exp(-xbeta(i1) - coef(i2)));
}
}
// compute P
for (int i1 = 0; i1 < n; i1++) {
for (int i2 = 0; i2 < k + 1; i2++) {
if (i2 == 0) {
P(i1, 0) = logit(i1, 0);
} else if (i2 == k) {
P(i1, k) = 1 - logit(i1, k - 1);
} else {
P(i1, i2) = logit(i1, i2) - logit(i1, i2 - 1);
}
if (P(i1, i2) < 1e-10) P(i1, i2) = 1e-10;
}
}
for (int i2 = 0; i2 < k; i2++) {
for (int i1 = 0; i1 < n; i1++) {
h_intercept(i2) += (1.0 / P(i1, i2) + 1.0 / P(i1, i2 + 1)) * logit(i1, i2) * logit(i1, i2) *
(1.0 - logit(i1, i2)) * (1.0 - logit(i1, i2)) * weights(i1);
}
}
W = Eigen::VectorXd::Zero(n);
for (int i = 0; i < n; i++) {
for (int i1 = 0; i1 < k; i1++) {
W(i) += (1.0 / P(i, i1) + 1.0 / P(i, i1 + 1)) * logit(i, i1) * logit(i, i1) * (1.0 - logit(i, i1)) *
(1.0 - logit(i, i1));
}
for (int i1 = 0; i1 < k - 1; i1++) {
W(i) -= 1.0 / P(i, i1 + 1) * logit(i, i1) * logit(i, i1 + 1) * (1.0 - logit(i, i1)) *
(1.0 - logit(i, i1 + 1));
}
W(i) *= weights(i);
}
return true;
}
bool primary_model_fit(T4 &X, Eigen::MatrixXd &y, Eigen::VectorXd &weights, Eigen::MatrixXd &beta,
Eigen::VectorXd &coef0, double loss0, Eigen::VectorXi &A, Eigen::VectorXi &g_indeX,
Eigen::VectorXi &g_size) {
int i;
int n = X.rows();
int p = X.cols();
int k = coef0.size() - 1;
// make sure that coef0 is increasing
for (int i = 1; i < k; i++) {
if (coef0(i) <= coef0(i - 1)) {
coef0(i) = coef0(i - 1) + 1;
}
}
double step = 1;
double loglik_new, loglik;
Eigen::VectorXd g(p + k);
Eigen::VectorXd coef_new;
Eigen::VectorXd h_intercept = Eigen::VectorXd::Zero(k);
Eigen::VectorXd h_diag = Eigen::VectorXd::Zero(k + p);
Eigen::VectorXd W(n);
Eigen::VectorXd desend_direction; // coef_new = coef + step * desend_direction
Eigen::VectorXd coef = Eigen::VectorXd::Zero(p + k);
coef.head(k) = coef0.head(k);
coef.tail(p) = beta.col(0);
loglik = -loss_function(X, y, weights, beta, coef0, A, g_indeX, g_size, this->lambda_level);
for (int j = 0; j < this->primary_model_fit_max_iter; j++) {
if (!gradient(X, y, weights, beta, coef0, g) || !hessianCore(X, y, weights, beta, coef0, h_intercept, W)) {
return false;
}
for (int i = 0; i < k; i++) {
if (h_intercept(i) < 1e-7 && h_intercept(i) >= 0)
h_diag(i) = 1e7;
else if (h_intercept(i) > -1e-7 && h_intercept(i) < 0)
h_diag(i) = -1e7;
else
h_diag(i) = 1.0 / h_intercept(i);
}
for (int i = 0; i < p; i++) {
h_diag(i + k) = X.col(i).cwiseProduct(W).dot(X.col(i)) + 2 * this->lambda_level;
if (h_diag(i + k) < 1e-7 && h_diag(i + k) >= 0)
h_diag(i + k) = 1e7;
else if (h_diag(i + k) > -1e-7 && h_diag(i + k) < 0)
h_diag(i + k) = -1e7;
else
h_diag(i + k) = 1.0 / h_diag(i + k);
}
desend_direction = g.cwiseProduct(h_diag);
coef_new = coef + step * desend_direction; // ApproXimate Newton method
i = 1;
while (i < k) {
for (i = 1; i < k; i++) {
if (coef_new(i) <= coef_new(i - 1)) {
step = step / 2;
coef_new = coef + step * desend_direction;
break;
}
}
}
beta.col(0) = coef_new.tail(p);
coef0.head(k) = coef_new.head(k);
loglik_new = -loss_function(X, y, weights, beta, coef0, A, g_indeX, g_size, this->lambda_level);
while (loglik_new < loglik && step > this->primary_model_fit_epsilon) {
step = step / 2;
coef_new = coef + step * desend_direction;
i = 1;
while (i < k) {
for (i = 1; i < k; i++) {
if (coef_new(i) <= coef_new(i - 1)) {
step = step / 2;
coef_new = coef + step * desend_direction;
break;
}
}
}
beta.col(0) = coef_new.tail(p);
coef0.head(k) = coef_new.head(k);
loglik_new = -loss_function(X, y, weights, beta, coef0, A, g_indeX, g_size, this->lambda_level);
}
bool condition1 = step < this->primary_model_fit_epsilon;
bool condition2 =
-(loglik_new + (this->primary_model_fit_max_iter - j - 1) * (loglik_new - loglik)) + this->tau > loss0;
if (condition1 || condition2) {
break;
}
beta.col(0) = coef_new.tail(p);
coef0.head(k) = coef_new.head(k);
coef = coef_new;
loglik = loglik_new;
}
for (int i = 0; i < beta.cols(); i++) {
beta.col(i) = coef.tail(p).eval();
}
coef0.head(k) = coef.head(k);
return true;
}
double loss_function(T4 &X, Eigen::MatrixXd &y, Eigen::VectorXd &weights, Eigen::MatrixXd &beta,
Eigen::VectorXd &coef0, Eigen::VectorXi &A, Eigen::VectorXi &g_indeX, Eigen::VectorXi &g_size,
double lambda) {
int n = X.rows();
// int p = X.cols();
int k = coef0.size() - 1;
Eigen::VectorXd xbeta = X * beta.col(0);
double loss = lambda * beta.col(0).cwiseAbs2().sum();
double pro = 0;
for (int i = 0; i < n; i++) {
for (int j = 0; j < k + 1; j++) {
if (y(i, j) == 1) {
if (j == 0) {
loss += log(1 + exp(-xbeta(i) - coef0(0))) * weights(i);
} else if (j == k) {
loss -= log(1 - 1.0 / (1 + exp(-xbeta(i) - coef0(k - 1)))) * weights(i);
} else {
pro = 1.0 / (1 + exp(-xbeta(i) - coef0(j))) - 1.0 / (1 + exp(-xbeta(i) - coef0(j - 1)));
if (pro < 1e-20) pro = 1e-20;
loss -= log(pro) * weights(i);
}
break;
}
}
}
return loss;
}
void sacrifice(T4 &X, T4 &XA, Eigen::MatrixXd &y, Eigen::MatrixXd &beta, Eigen::MatrixXd &beta_A,
Eigen::VectorXd &coef0, Eigen::VectorXi &A, Eigen::VectorXi &I, Eigen::VectorXd &weights,
Eigen::VectorXi &g_indeX, Eigen::VectorXi &g_size, int N, Eigen::VectorXi &A_ind,
Eigen::VectorXd &bd, Eigen::VectorXi &U, Eigen::VectorXi &U_ind, int num) {
int n = X.rows();
int p = X.cols();
int k = coef0.size() - 1;
Eigen::VectorXd W = Eigen::VectorXd::Zero(n);
Eigen::VectorXd d(p);
Eigen::VectorXd h_intercept = Eigen::VectorXd::Zero(k);
gradient(X, y, weights, beta, coef0, d);
hessianCore(X, y, weights, beta, coef0, h_intercept, W);
Eigen::VectorXd betabar = Eigen::VectorXd::Zero(p);
Eigen::VectorXd dbar = Eigen::VectorXd::Zero(p);
// we only need N diagonal sub-matriX of hessian of Loss, X^T %*% diag(EY^2) %*% X is OK, but waste.
for (int i = 0; i < N; i++) {
T4 XG = X.middleCols(g_indeX(i), g_size(i));
T4 XG_new = XG;
for (int j = 0; j < g_size(i); j++) {
XG_new.col(j) = XG.col(j).cwiseProduct(W);
}
// hessianG is the ith group diagonal sub-matriX of hessian matriX of Loss.
Eigen::MatrixXd hessianG =
XG_new.transpose() * XG + 2 * this->lambda_level * Eigen::MatrixXd::Identity(g_size(i), g_size(i));
Eigen::MatrixXd phiG;
hessianG.sqrt().evalTo(phiG);
Eigen::MatrixXd invphiG = phiG.ldlt().solve(
Eigen::MatrixXd::Identity(g_size(i), g_size(i))); // this is a way to inverse a matriX.
betabar.segment(g_indeX(i), g_size(i)) = phiG * beta.col(0).segment(g_indeX(i), g_size(i));
dbar.segment(g_indeX(i), g_size(i)) = invphiG * d.segment(g_indeX(i), g_size(i));
}
int A_size = A.size();
int I_size = I.size();
for (int i = 0; i < A_size; i++) {
bd(A[i]) = betabar.segment(g_indeX(A[i]), g_size(A[i])).squaredNorm() / g_size(A[i]);
}
for (int i = 0; i < I_size; i++) {
bd(I[i]) = dbar.segment(g_indeX(I[i]), g_size(I[i])).squaredNorm() / g_size(I[i]);
}
}
double effective_number_of_parameter(T4 &X, T4 &XA, Eigen::MatrixXd &y, Eigen::VectorXd &weights,
Eigen::MatrixXd &beta, Eigen::MatrixXd &beta_A, Eigen::VectorXd &coef0) {
if (this->lambda_level == 0.) return XA.cols();
if (XA.cols() == 0) return 0.;
int n = X.rows();
// int p = X.cols();
int k = coef0.size() - 1;
Eigen::VectorXd h_intercept = Eigen::VectorXd::Zero(k);
Eigen::VectorXd W = Eigen::VectorXd::Zero(n);
hessianCore(X, y, weights, beta, coef0, h_intercept, W);
T4 XA_new = XA;
for (int j = 0; j < XA.cols(); j++) {
XA_new.col(j) = XA.col(j).cwiseProduct(W);
}
Eigen::MatrixXd XGbar = XA_new.transpose() * XA;
Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> adjoint_eigen_solver(XGbar);
double enp = 0.;
for (int i = 0; i < adjoint_eigen_solver.eigenvalues().size(); i++) {
enp += adjoint_eigen_solver.eigenvalues()(i) / (adjoint_eigen_solver.eigenvalues()(i) + this->lambda_level);
}
return enp;
}
};
#endif // SRC_ALGORITHMGLM_H
| 84,696 | 43.320774 | 120 | h |
abess | abess-master/src/AlgorithmPCA.h | #ifndef SRC_ALGORITHMPCA_H
#define SRC_ALGORITHMPCA_H
#include <Spectra/SymEigsSolver.h>
#include "Algorithm.h"
using namespace Spectra;
template <class T4>
class abessPCA : public Algorithm<Eigen::VectorXd, Eigen::VectorXd, double, T4> {
public:
int pca_n = -1;
bool is_cv = false;
MatrixXd sigma;
abessPCA(int algorithm_type, int model_type, int max_iter = 30, int primary_model_fit_max_iter = 10,
double primary_model_fit_epsilon = 1e-8, bool warm_start = true, int exchange_num = 5,
Eigen::VectorXi always_select = Eigen::VectorXi::Zero(0), int splicing_type = 1, int sub_search = 0)
: Algorithm<Eigen::VectorXd, Eigen::VectorXd, double, T4>::Algorithm(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon, warm_start,
exchange_num, always_select, splicing_type, sub_search){};
~abessPCA(){};
void inital_setting(T4 &X, VectorXd &y, Eigen::VectorXd &weights, Eigen::VectorXi &g_index, Eigen::VectorXi &g_size,
int &N) {
if (this->is_cv) {
this->sigma = compute_Sigma(X);
}
}
void updata_tau(int train_n, int N) {
if (this->pca_n > 0) train_n = this->pca_n;
if (train_n == 1) {
this->tau = 0.0;
} else {
this->tau =
0.01 * (double)this->sparsity_level * log((double)N) * log(log((double)train_n)) / (double)train_n;
}
}
bool primary_model_fit(T4 &x, Eigen::VectorXd &y, Eigen::VectorXd &weights, Eigen::VectorXd &beta, double &coef0,
double loss0, Eigen::VectorXi &A, Eigen::VectorXi &g_index, Eigen::VectorXi &g_size) {
if (beta.size() == 0) return true;
if (beta.size() == 1) {
beta(0) = 1;
return true;
}
MatrixXd Y = SigmaA(this->sigma, A, g_index, g_size);
DenseSymMatProd<double> op(Y);
SymEigsSolver<DenseSymMatProd<double>> eig(op, 1, 2);
eig.init();
eig.compute();
MatrixXd temp;
if (eig.info() == CompInfo::Successful) {
temp = eig.eigenvectors(1);
} else {
return false;
}
beta = temp.col(0);
return true;
};
double loss_function(T4 &X, Eigen::VectorXd &y, Eigen::VectorXd &weights, Eigen::VectorXd &beta, double &coef0,
Eigen::VectorXi &A, Eigen::VectorXi &g_index, Eigen::VectorXi &g_size, double lambda) {
MatrixXd Y;
if (this->is_cv) {
MatrixXd sigma_test = compute_Sigma(X);
Y = SigmaA(sigma_test, A, g_index, g_size);
} else {
Y = SigmaA(this->sigma, A, g_index, g_size);
}
return -beta.transpose() * Y * beta;
};
void sacrifice(T4 &X, T4 &XA, Eigen::VectorXd &y, Eigen::VectorXd &beta, Eigen::VectorXd &beta_A, double &coef0,
Eigen::VectorXi &A, Eigen::VectorXi &I, Eigen::VectorXd &weights, Eigen::VectorXi &g_index,
Eigen::VectorXi &g_size, int N, Eigen::VectorXi &A_ind, Eigen::VectorXd &bd, Eigen::VectorXi &U,
Eigen::VectorXi &U_ind, int num) {
VectorXd D = -this->sigma * beta + beta.transpose() * this->sigma * beta * beta;
for (int i = 0; i < A.size(); i++) {
VectorXd temp = beta.segment(g_index(A(i)), g_size(A(i)));
bd(A(i)) = temp.squaredNorm();
}
for (int i = 0; i < I.size(); i++) {
VectorXd temp = D.segment(g_index(I(i)), g_size(I(i)));
bd(I(i)) = temp.squaredNorm();
}
};
MatrixXd SigmaA(Eigen::MatrixXd &Sigma, Eigen::VectorXi &A, Eigen::VectorXi &g_index, Eigen::VectorXi &g_size) {
int len = 0;
for (int i = 0; i < A.size(); i++) {
len += g_size(A(i));
}
int k = 0;
VectorXd ind(len);
for (int i = 0; i < A.size(); i++)
for (int j = 0; j < g_size(A(i)); j++) ind(k++) = g_index(A(i)) + j;
MatrixXd SA(len, len);
for (int i = 0; i < len; i++)
for (int j = 0; j < i + 1; j++) {
int di = ind(i), dj = ind(j);
SA(i, j) = Sigma(di, dj);
SA(j, i) = Sigma(dj, di);
}
return SA;
}
MatrixXd compute_Sigma(T4 &X) {
MatrixXd X1 = MatrixXd(X);
MatrixXd centered = X1.rowwise() - X1.colwise().mean();
return centered.adjoint() * centered / (X1.rows() - 1);
}
};
template <class T4>
class abessRPCA : public Algorithm<Eigen::VectorXd, Eigen::VectorXd, double, T4> {
public:
MatrixXd L;
int r = 10;
abessRPCA(int algorithm_type, int model_type, int max_iter = 30, int primary_model_fit_max_iter = 10,
double primary_model_fit_epsilon = 1e-8, bool warm_start = true, int exchange_num = 5,
Eigen::VectorXi always_select = Eigen::VectorXi::Zero(0), int splicing_type = 1, int sub_search = 0)
: Algorithm<Eigen::VectorXd, Eigen::VectorXd, double, T4>::Algorithm(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon, warm_start,
exchange_num, always_select, splicing_type, sub_search){};
~abessRPCA(){};
int get_beta_size(int n, int p) { return n * p; }
void update_tau(int train_n, int N) { this->tau = 0.0; }
Eigen::VectorXi inital_screening(T4 &X, Eigen::VectorXd &y, Eigen::VectorXd &beta, double &coef0,
Eigen::VectorXi &A, Eigen::VectorXi &I, Eigen::VectorXd &bd,
Eigen::VectorXd &weights, Eigen::VectorXi &g_index, Eigen::VectorXi &g_size,
int &N) {
MatrixXd S;
if (bd.size() == 0) {
// variable initialization
bd = VectorXd::Zero(N);
this->L = this->trun_svd(X);
S = X - this->L;
S.resize(N, 1);
for (int i = 0; i < N; i++) bd(i) = abs(S(i, 0));
// A_init
for (int i = 0; i < A.size(); i++) {
bd(A(i)) = DBL_MAX / 2;
}
// alway_select
for (int i = 0; i < (this->always_select).size(); i++) {
bd(this->always_select(i)) = DBL_MAX;
}
this->r = (int)this->lambda_level;
}
// get Active-set A according to max_k bd
VectorXi A_new = max_k(bd, this->sparsity_level);
return A_new;
}
bool primary_model_fit(T4 &x, Eigen::VectorXd &y, Eigen::VectorXd &weights, Eigen::VectorXd &beta, double &coef0,
double loss0, Eigen::VectorXi &A, Eigen::VectorXi &g_index, Eigen::VectorXi &g_size) {
int n = x.rows();
MatrixXd L_old = this->L;
this->L = this->HardImpute(x, A, 1000, 1e-5);
for (int i = 0; i < A.size(); i++) {
int mi = A(i) % n;
int mj = int(A(i) / n);
beta(i) = x.coeff(mi, mj) - this->L(mi, mj);
}
double loss1 = this->loss_function(x, y, weights, beta, coef0, A, g_index, g_size, 0);
if (loss0 - loss1 <= this->tau) {
this->L = L_old;
}
return true;
};
double loss_function(T4 &X, Eigen::VectorXd &y, Eigen::VectorXd &weights, Eigen::VectorXd &beta, double &coef0,
Eigen::VectorXi &A, Eigen::VectorXi &g_index, Eigen::VectorXi &g_size, double lambda) {
int n = X.rows();
int p = X.cols();
// MatrixXd L = this->HardImpute(X, A, 1000, 1e-5);
MatrixXd S = compute_S(beta, A, n, p);
MatrixXd W = X - this->L - S;
return W.squaredNorm() / n / p;
};
void sacrifice(T4 &X, T4 &XA, Eigen::VectorXd &y, Eigen::VectorXd &beta, Eigen::VectorXd &beta_A, double &coef0,
Eigen::VectorXi &A, Eigen::VectorXi &I, Eigen::VectorXd &weights, Eigen::VectorXi &g_index,
Eigen::VectorXi &g_size, int N, Eigen::VectorXi &A_ind, Eigen::VectorXd &bd, Eigen::VectorXi &U,
Eigen::VectorXi &U_ind, int num) {
int n = X.rows();
int p = X.cols();
// MatrixXd L = this->HardImpute(X, A, 1000, 1e-5);
MatrixXd S = compute_S(beta_A, A, n, p);
MatrixXd W = X - this->L - S;
for (int i = 0; i < A.size(); i++) {
int mi = A(i) % n;
int mj = int(A(i) / n);
bd(A(i)) = S(mi, mj) * S(mi, mj) + 2 * S(mi, mj) * W(mi, mj);
}
for (int i = 0; i < I.size(); i++) {
int mi = I(i) % n;
int mj = int(I(i) / n);
bd(I(i)) = W(mi, mj) * W(mi, mj);
}
return;
};
MatrixXd trun_svd(MatrixXd X) {
int m = X.rows(), n = X.cols(), K = this->r;
MatrixXd Y(m, n);
if (m > n) {
MatrixXd R = X.transpose() * X;
DenseSymMatProd<double> op_r(R);
SymEigsSolver<DenseSymMatProd<double>> eig_r(op_r, K, 2 * K > n ? n : 2 * K);
eig_r.init();
eig_r.compute(SortRule::LargestAlge);
VectorXd evalues;
if (eig_r.info() == CompInfo::Successful) {
evalues = eig_r.eigenvalues();
int num = 0;
for (int s = 0; s < K; s++) {
if (evalues(s) > 0) {
num++;
}
}
if (num < K) {
K = num;
}
MatrixXd vec_r = eig_r.eigenvectors(K);
Y = X * vec_r * vec_r.transpose();
}
} else {
MatrixXd L = X * X.transpose();
DenseSymMatProd<double> op_l(L);
SymEigsSolver<DenseSymMatProd<double>> eig_l(op_l, K, 2 * K > m ? m : 2 * K);
eig_l.init();
eig_l.compute(SortRule::LargestAlge);
VectorXd evalues;
if (eig_l.info() == CompInfo::Successful) {
evalues = eig_l.eigenvalues();
int num = 0;
for (int s = 0; s < K; s++) {
if (evalues(s) > 0) {
num++;
}
}
if (num < K) {
K = num;
}
MatrixXd vec_l = eig_l.eigenvectors(K);
Y = vec_l * vec_l.transpose() * X;
}
}
return Y;
};
MatrixXd HardImpute(T4 &X, VectorXi &A, int max_it, double tol) {
int m = X.rows(), n = X.cols();
MatrixXd Z_old = MatrixXd::Zero(m, n);
MatrixXd Z_new(m, n);
MatrixXd lambda = MatrixXd::Zero(m, n);
double eps = 1;
int count = 0;
while (eps > tol && count < max_it) {
lambda = X - Z_old;
for (int i = 0; i < A.size(); i++) {
int r = A(i) % m;
int c = int(A(i) / m);
lambda(r, c) = 0;
}
Z_new = trun_svd(Z_old + lambda);
eps = (Z_new - Z_old).squaredNorm() / Z_old.squaredNorm();
Z_old = Z_new;
count++;
}
return Z_new;
}
MatrixXd compute_S(VectorXd &beta, VectorXi &A, int n, int p) {
MatrixXd S = MatrixXd::Zero(n, p);
for (int i = 0; i < A.size(); i++) {
int mi = A(i) % n;
int mj = int(A(i) / n);
S(mi, mj) = beta(i);
}
return S;
};
};
#endif // SRC_ALGORITHMPCA_H
| 11,557 | 36.044872 | 120 | h |
abess | abess-master/src/Data.h | //
// Created by Jin Zhu on 2020/2/18.
//
// #define R_BUILD
#ifndef SRC_DATA_H
#define SRC_DATA_H
#ifdef R_BUILD
#include <RcppEigen.h>
// [[Rcpp::depends(RcppEigen)]]
#else
#include <Eigen/Eigen>
#endif
#include <iostream>
#include <vector>
#include "normalize.h"
#include "utilities.h"
using namespace std;
using namespace Eigen;
template <class T1, class T2, class T3, class T4>
class Data {
public:
T4 x;
T1 y;
Eigen::VectorXd weight;
Eigen::VectorXd x_mean;
Eigen::VectorXd x_norm;
T3 y_mean;
int n;
int p;
int M;
int normalize_type;
int g_num;
Eigen::VectorXi g_index;
Eigen::VectorXi g_size;
Data() = default;
Data(T4 &x, T1 &y, int normalize_type, Eigen::VectorXd &weight, Eigen::VectorXi &g_index, bool sparse_matrix,
int beta_size) {
this->x = x;
this->y = y;
this->normalize_type = normalize_type;
this->n = x.rows();
this->p = x.cols();
this->M = y.cols();
this->weight = weight;
this->x_mean = Eigen::VectorXd::Zero(this->p);
this->x_norm = Eigen::VectorXd::Zero(this->p);
if (normalize_type > 0 && !sparse_matrix) {
this->normalize();
}
this->g_index = g_index;
this->g_num = g_index.size();
Eigen::VectorXi temp = Eigen::VectorXi::Zero(this->g_num);
for (int i = 0; i < g_num - 1; i++) temp(i) = g_index(i + 1);
temp(g_num - 1) = beta_size;
this->g_size = temp - g_index;
};
void normalize() {
if (this->normalize_type == 1) {
Normalize(this->x, this->y, this->weight, this->x_mean, this->y_mean, this->x_norm);
} else if (this->normalize_type == 2) {
Normalize3(this->x, this->weight, this->x_mean, this->x_norm);
} else {
Normalize4(this->x, this->weight, this->x_norm);
}
};
// Eigen::VectorXi get_g_index()
// {
// return this->g_index;
// };
// int get_g_num()
// {
// return this->g_num;
// };
// Eigen::VectorXi get_g_size()
// {
// return this->g_size;
// };
// int get_n()
// {
// return this->n;
// };
// int get_p()
// {
// return this->p;
// };
};
#endif // SRC_DATA_H
| 2,313 | 21.686275 | 113 | h |
abess | abess-master/src/Metric.h | //
// Created by Jin Zhu on 2020/2/18.
//
// #define R_BUILD
#ifndef SRC_METRICS_H
#define SRC_METRICS_H
#ifdef R_BUILD
#include <Rcpp.h>
using namespace Rcpp;
#endif
#include <algorithm>
#include <random>
#include <vector>
#include "Algorithm.h"
#include "Data.h"
#include "utilities.h"
template <class T1, class T2, class T3, class T4>
// To do: calculate loss && all to one && lm poisson cox
class Metric {
public:
bool is_cv;
int Kfold;
int eval_type;
double ic_coef;
bool raise_warning = true;
// Eigen::Matrix<T2, Dynamic, 1> cv_initial_model_param;
// Eigen::Matrix<T3, Dynamic, 1> cv_initial_coef0;
// std::vector<Eigen::VectorXi> cv_initial_A;
// std::vector<Eigen::VectorXi> cv_initial_I;
std::vector<Eigen::VectorXi> train_mask_list;
std::vector<Eigen::VectorXi> test_mask_list;
std::vector<T4> train_X_list;
std::vector<T4> test_X_list;
std::vector<T1> train_y_list;
std::vector<T1> test_y_list;
std::vector<Eigen::VectorXd> train_weight_list;
std::vector<Eigen::VectorXd> test_weight_list;
std::vector<FIT_ARG<T2, T3>> cv_init_fit_arg;
// std::vector<std::vector<T4>> group_XTX_list;
Metric() = default;
Metric(int eval_type, double ic_coef = 1.0, int Kfold = 5) {
this->is_cv = Kfold > 1;
this->eval_type = eval_type;
this->Kfold = Kfold;
this->ic_coef = ic_coef;
if (is_cv) {
cv_init_fit_arg.resize(Kfold);
train_X_list.resize(Kfold);
test_X_list.resize(Kfold);
train_y_list.resize(Kfold);
test_y_list.resize(Kfold);
test_weight_list.resize(Kfold);
train_weight_list.resize(Kfold);
}
};
void set_cv_init_fit_arg(int beta_size, int M) {
for (int i = 0; i < this->Kfold; i++) {
T2 beta_init;
T3 coef0_init;
coef_set_zero(beta_size, M, beta_init, coef0_init);
Eigen::VectorXi A_init;
Eigen::VectorXd bd_init;
FIT_ARG<T2, T3> fit_arg(0, 0., beta_init, coef0_init, bd_init, A_init);
cv_init_fit_arg[i] = fit_arg;
}
}
// void set_cv_initial_model_param(int Kfold, int p)
// {
// this->cv_initial_model_param = Eigen::MatrixXd::Zero(p, Kfold);
// };
// void set_cv_initial_A(int Kfold, int p)
// {
// vector<Eigen::VectorXi> tmp(Kfold);
// this->cv_initial_A = tmp;
// };
// void set_cv_initial_coef0(int Kfold, int p)
// {
// vector<double> tmp(Kfold);
// for (int i = 0; i < Kfold; i++)
// tmp[i] = 0;
// this->cv_initial_coef0 = tmp;
// };
// void update_cv_initial_model_param(Eigen::VectorXd model_param, int k)
// {
// this->cv_initial_model_param.col(k) = model_param;
// }
// void update_cv_initial_A(Eigen::VectorXi A, int k)
// {
// this->cv_initial_A[k] = A;
// }
// void update_cv_initial_coef0(double coef0, int k)
// {
// this->cv_initial_coef0[k] = coef0;
// }
void set_cv_train_test_mask(Data<T1, T2, T3, T4> &data, int n, Eigen::VectorXi &cv_fold_id) {
Eigen::VectorXi index_list(n);
std::vector<int> index_vec((unsigned int)n);
std::vector<Eigen::VectorXi> group_list((unsigned int)this->Kfold);
for (int i = 0; i < n; i++) {
index_vec[i] = i;
}
if (cv_fold_id.size() == 0) {
// std::random_device rd;
std::mt19937 g(123);
std::shuffle(index_vec.begin(), index_vec.end(), g);
for (int i = 0; i < n; i++) {
index_list(i) = index_vec[i];
}
Eigen::VectorXd loss_list(this->Kfold);
int group_size = int(n / this->Kfold);
for (int k = 0; k < (this->Kfold - 1); k++) {
group_list[k] = index_list.segment(int(k * group_size), group_size);
}
group_list[this->Kfold - 1] =
index_list.segment(int((this->Kfold - 1) * group_size), n - int(int(this->Kfold - 1) * group_size));
} else {
// given cv_fold_id
auto rule = [cv_fold_id](int i, int j) -> bool { return cv_fold_id(i) < cv_fold_id(j); };
std::sort(index_vec.begin(), index_vec.end(), rule);
for (int i = 0; i < n; i++) {
index_list(i) = index_vec[i];
}
int k = 0, st = 0, ed = 1;
while (k < this->Kfold && ed < n) {
int mask = cv_fold_id(index_list(st));
while (ed < n && mask == cv_fold_id(index_list(ed))) ed++;
group_list[k] = index_list.segment(st, ed - st);
st = ed;
ed++;
k++;
}
}
for (int k = 0; k < this->Kfold; k++) {
std::sort(group_list[k].data(), group_list[k].data() + group_list[k].size());
}
// cv train-test partition:
std::vector<Eigen::VectorXi> train_mask_list_tmp((unsigned int)this->Kfold);
std::vector<Eigen::VectorXi> test_mask_list_tmp((unsigned int)this->Kfold);
for (int k = 0; k < this->Kfold; k++) {
int train_x_size = n - group_list[k].size();
// get train_mask
Eigen::VectorXi train_mask(train_x_size);
int i = 0;
for (int j = 0; j < this->Kfold; j++) {
if (j != k) {
for (int s = 0; s < group_list[j].size(); s++) {
train_mask(i) = group_list[j](s);
i++;
}
}
}
std::sort(train_mask.data(), train_mask.data() + train_mask.size());
train_mask_list_tmp[k] = train_mask;
test_mask_list_tmp[k] = group_list[k];
slice(data.x, train_mask, this->train_X_list[k]);
slice(data.x, group_list[k], this->test_X_list[k]);
slice(data.y, train_mask, this->train_y_list[k]);
slice(data.y, group_list[k], this->test_y_list[k]);
slice(data.weight, train_mask, this->train_weight_list[k]);
slice(data.weight, group_list[k], this->test_weight_list[k]);
}
this->train_mask_list = train_mask_list_tmp;
this->test_mask_list = test_mask_list_tmp;
};
// void cal_cv_group_XTX(Data<T1, T2, T3> &data)
// {
// int p = data.p;
// Eigen::VectorXi index = data.g_index;
// Eigen::VectorXi gsize = data.g_size;
// int N = data.g_num;
// std::vector<std::vector<Eigen::MatrixXd>> group_XTX_list_tmp(this->Kfold);
// for (int k = 0; k < this->Kfold; k++)
// {
// int train_size = this->train_mask_list[k].size();
// Eigen::MatrixXd train_x(train_size, p);
// for (int i = 0; i < train_size; i++)
// {
// train_x.row(i) = data.x.row(this->train_mask_list[k](i));
// };
// group_XTX_list_tmp[k] = group_XTX(train_x, index, gsize, train_size, p, N, 1);
// }
// this->group_XTX_list = group_XTX_list_tmp;
// }
double ic(int train_n, int M, int N, Algorithm<T1, T2, T3, T4> *algorithm) {
// information criterioin: for non-CV
double loss;
if (algorithm->model_type == 1 || algorithm->model_type == 5) {
loss = train_n *
log(algorithm->get_train_loss() - algorithm->lambda_level * algorithm->beta.cwiseAbs2().sum());
} else {
loss = 2 * (algorithm->get_train_loss() - algorithm->lambda_level * algorithm->beta.cwiseAbs2().sum());
}
// 0. only loss
if (this->eval_type == 0) {
return loss;
}
// 1. AIC
if (this->eval_type == 1) {
return loss + 2.0 * algorithm->get_effective_number();
}
// 2. BIC
if (this->eval_type == 2) {
return loss + this->ic_coef * log(double(train_n)) * algorithm->get_effective_number();
}
// 3. GIC
if (this->eval_type == 3) {
return loss +
this->ic_coef * log(double(N)) * log(log(double(train_n))) * algorithm->get_effective_number();
}
// 4. EBIC
if (this->eval_type == 4) {
return loss +
this->ic_coef * (log(double(train_n)) + 2 * log(double(N))) * algorithm->get_effective_number();
}
// 5. HIC
if (this->eval_type == 5) {
return train_n *
(algorithm->get_train_loss() - algorithm->lambda_level * algorithm->beta.cwiseAbs2().sum()) +
this->ic_coef * log(double(N)) * log(log(double(train_n))) * algorithm->get_effective_number();
}
if (this->raise_warning) {
#ifdef R_BUILD
Rcout << "[warning] No available IC type for training. Use loss instead. "
<< "(E" << this->eval_type << "M" << algorithm->model_type << ")" << endl;
#else
cout << "[warning] No available IC type for training. Use loss instead. "
<< "(E" << this->eval_type << "M" << algorithm->model_type << ")" << endl;
#endif
this->raise_warning = false;
}
// return 0;
return loss;
};
double test_loss(T4 &test_x, T1 &test_y, Eigen::VectorXd &test_weight, Eigen::VectorXi &g_index,
Eigen::VectorXi &g_size, int test_n, int p, int N, Algorithm<T1, T2, T3, T4> *algorithm) {
// test loss: for CV
Eigen::VectorXi A = algorithm->get_A_out();
T2 beta = algorithm->get_beta();
T3 coef0 = algorithm->get_coef0();
Eigen::VectorXi A_ind = find_ind(A, g_index, g_size, beta.rows(), N);
T4 test_X_A = X_seg(test_x, test_n, A_ind, algorithm->model_type);
T2 beta_A;
slice(beta, A_ind, beta_A);
// 0. only test loss
if (this->eval_type == 0) {
return algorithm->loss_function(test_X_A, test_y, test_weight, beta_A, coef0, A, g_index, g_size,
algorithm->lambda_level);
}
// 1. negative AUC (for logistic)
if (this->eval_type == 1 && algorithm->model_type == 2) {
// compute probability
Eigen::VectorXd test_y_temp = test_y;
Eigen::VectorXd proba = test_X_A * beta_A + coef0 * Eigen::VectorXd::Ones(test_n);
proba = proba.array().exp();
proba = proba.cwiseQuotient(Eigen::VectorXd::Ones(test_n) + proba);
return -this->binary_auc_score(test_y_temp, proba);
}
// 2. 3. negative AUC, One vs One/Rest (for multinomial)
if (algorithm->model_type == 6) {
int M = test_y.cols();
// compute probability
Eigen::MatrixXd proba = test_X_A * beta_A;
proba = rowwise_add(proba, coef0);
proba = proba.array().exp();
Eigen::VectorXd proba_rowsum = proba.rowwise().sum();
proba = proba.cwiseQuotient(proba_rowsum.replicate(1, p));
// compute AUC
if (this->eval_type == 2) {
// (One vs One) the AUC of all possible pairwise combinations of classes
double auc = 0;
for (int i = 0; i < M - 1; i++) {
for (int j = i + 1; j < M; j++) {
int nij = 0;
Eigen::VectorXd test_y_i(test_n), test_y_j(test_n), proba_i(test_n), proba_j(test_n);
// extract samples who belongs to class i or j
for (int k = 0; k < test_n; k++) {
if (test_y(k, i) + test_y(k, j) > 0) {
test_y_i(nij) = test_y(k, i);
test_y_j(nij) = test_y(k, j);
proba_i(nij) = proba(k, i);
proba_j(nij) = proba(k, j);
nij++;
}
}
test_y_i = test_y_i.head(nij).eval();
test_y_j = test_y_j.head(nij).eval();
proba_i = proba_i.head(nij).eval();
proba_j = proba_j.head(nij).eval();
// get AUC
auc += this->binary_auc_score(test_y_i, proba_i);
auc += this->binary_auc_score(test_y_j, proba_j);
}
}
return -auc / (p * (p - 1));
}
if (this->eval_type == 3) {
// (One vs Rest) the AUC of each class against the rest
double auc = 0;
for (int i = 0; i < M; i++) {
Eigen::VectorXd test_y_single = test_y.col(i);
Eigen::VectorXd proba_single = proba.col(i);
auc += this->binary_auc_score(test_y_single, proba_single);
}
return -auc / p;
}
}
if (this->raise_warning) {
#ifdef R_BUILD
Rcout << "[warning] No available CV score for training. Use test_loss instead. "
<< "(E" << this->eval_type << "M" << algorithm->model_type << ")" << endl;
#else
cout << "[warning] No available CV score for training. Use test_loss instead. "
<< "(E" << this->eval_type << "M" << algorithm->model_type << ")" << endl;
#endif
this->raise_warning = false;
}
// return 0;
return algorithm->loss_function(test_X_A, test_y, test_weight, beta_A, coef0, A, g_index, g_size,
algorithm->lambda_level);
};
double binary_auc_score(Eigen::VectorXd &true_label, Eigen::VectorXd &pred_proba) {
// sort proba from large to small
int n = true_label.rows();
Eigen::VectorXi sort_ind = max_k(pred_proba, n, true);
// use each value as threshold to get TPR, FPR
double tp = 0, fp = 0, positive = true_label.sum();
double last_tpr = 0, last_fpr = 0, auc = 0;
if (positive == 0 || positive == n) {
#ifdef R_BUILD
Rcout << "[Warning] There is only one class in the test data, "
<< "the result may be meaningless. Please use another type of loss, "
<< "or try to specify cv_fold_id." << endl;
#else
cout << "[Warning] There is only one class in the test data, "
<< "the result may be meaningless. Please use another type of loss, "
<< "or try to specify cv_fold_id." << endl;
#endif
}
for (int i = 0; i < n; i++) {
// current threshold: pred_proba(sort_ind(i))
int k = sort_ind(i);
tp += true_label(k);
fp += 1 - true_label(k);
// skip same threshold
if (i < n - 1) {
int kk = sort_ind(i + 1);
if (pred_proba(k) == pred_proba(kk)) continue;
}
// compute tpr, fpr
double tpr = tp / positive;
double fpr = fp / (n - positive);
if (fpr > last_fpr) {
auc += (tpr + last_tpr) / 2 * (fpr - last_fpr);
}
last_tpr = tpr;
last_fpr = fpr;
}
return auc;
};
// to do
Eigen::VectorXd fit_and_evaluate_in_metric(std::vector<Algorithm<T1, T2, T3, T4> *> algorithm_list,
Data<T1, T2, T3, T4> &data, FIT_ARG<T2, T3> &fit_arg) {
Eigen::VectorXd loss_list(this->Kfold);
if (!is_cv) {
algorithm_list[0]->update_sparsity_level(fit_arg.support_size);
algorithm_list[0]->update_lambda_level(fit_arg.lambda);
algorithm_list[0]->update_beta_init(fit_arg.beta_init);
algorithm_list[0]->update_bd_init(fit_arg.bd_init);
algorithm_list[0]->update_coef0_init(fit_arg.coef0_init);
algorithm_list[0]->update_A_init(fit_arg.A_init, data.g_num);
algorithm_list[0]->fit(data.x, data.y, data.weight, data.g_index, data.g_size, data.n, data.p, data.g_num);
if (algorithm_list[0]->get_warm_start()) {
fit_arg.beta_init = algorithm_list[0]->get_beta();
fit_arg.coef0_init = algorithm_list[0]->get_coef0();
fit_arg.bd_init = algorithm_list[0]->get_bd();
}
loss_list(0) = this->ic(data.n, data.M, data.g_num, algorithm_list[0]);
} else {
Eigen::VectorXi g_index = data.g_index;
Eigen::VectorXi g_size = data.g_size;
int p = data.p;
int N = data.g_num;
#pragma omp parallel for
// parallel
for (int k = 0; k < this->Kfold; k++) {
// get test_x, test_y
int test_n = this->test_mask_list[k].size();
int train_n = this->train_mask_list[k].size();
// train & test data
// Eigen::MatrixXd train_x = matrix_slice(data.x, this->train_mask_list[k], 0);
// Eigen::MatrixXd test_x = matrix_slice(data.x, this->test_mask_list[k], 0);
// Eigen::VectorXd train_y = vector_slice(data.y, this->train_mask_list[k]);
// Eigen::VectorXd test_y = vector_slice(data.y, this->test_mask_list[k]);
// Eigen::VectorXd train_weight = vector_slice(data.weight, this->train_mask_list[k]);
// Eigen::VectorXd test_weight = vector_slice(data.weight, this->test_mask_list[k]);
// Eigen::VectorXd beta_init;
algorithm_list[k]->update_sparsity_level(fit_arg.support_size);
algorithm_list[k]->update_lambda_level(fit_arg.lambda);
algorithm_list[k]->update_beta_init(this->cv_init_fit_arg[k].beta_init);
algorithm_list[k]->update_bd_init(this->cv_init_fit_arg[k].bd_init);
algorithm_list[k]->update_coef0_init(this->cv_init_fit_arg[k].coef0_init);
algorithm_list[k]->update_A_init(this->cv_init_fit_arg[k].A_init, N);
// beta_init = this->cv_initial_model_param.col(k).eval();
// algorithm->update_beta_init(beta_init);
// algorithm->update_coef0_init(this->cv_initial_coef0[k]);
// algorithm->update_A_init(this->cv_initial_A[k], N);
// algorithm->update_train_mask(this->train_mask_list[k]);
// ??????????????????????????????????????????????????????????????
algorithm_list[k]->fit(this->train_X_list[k], this->train_y_list[k], this->train_weight_list[k],
g_index, g_size, train_n, p, N);
if (algorithm_list[k]->get_warm_start()) {
this->cv_init_fit_arg[k].beta_init = algorithm_list[k]->get_beta();
this->cv_init_fit_arg[k].coef0_init = algorithm_list[k]->get_coef0();
this->cv_init_fit_arg[k].bd_init = algorithm_list[k]->get_bd();
// this->update_cv_initial_model_param(algorithm->get_beta(), k);
// this->update_cv_initial_A(algorithm->get_A_out(), k);
// this->update_cv_initial_coef0(algorithm->get_coef0(), k);
}
loss_list(k) = this->test_loss(this->test_X_list[k], this->test_y_list[k], this->test_weight_list[k],
g_index, g_size, test_n, p, N, algorithm_list[k]);
}
}
return loss_list;
};
};
#endif // SRC_METRICS_H
| 19,691 | 39.68595 | 119 | h |
abess | abess-master/src/abessOpenMP.h | #ifndef SRC_ABESSOPENMP_H
#define SRC_ABESSOPENMP_H
#ifdef _OPENMP
#include <omp.h>
// [[Rcpp::plugins(openmp)]]
#else
#ifndef DISABLE_OPENMP
#ifndef R_BUILD
// use pragma message instead of warning
#pragma message( \
"Warning: OpenMP is not available, " \
"project will be compiled into single-thread code. " \
"Use OpenMP-enabled compiler to get benefit of multi-threading.")
#endif
#endif
inline int omp_get_thread_num() { return 0; }
inline int omp_get_num_threads() { return 1; }
inline int omp_get_num_procs() { return 1; }
inline void omp_set_num_threads(int nthread) {}
inline void omp_set_dynamic(int flag) {}
#endif
#endif // SRC_ABESSOPENMP_H
| 725 | 28.04 | 69 | h |
abess | abess-master/src/api.cpp | // #define R_BUILD
#ifdef R_BUILD
#include <Rcpp.h>
#include <RcppEigen.h>
// [[Rcpp::depends(RcppEigen)]]
using namespace Rcpp;
#else
#include <Eigen/Eigen>
#include "List.h"
#endif
#include <iostream>
#include <vector>
#include "Algorithm.h"
#include "AlgorithmGLM.h"
#include "AlgorithmPCA.h"
#include "utilities.h"
#include "workflow.h"
typedef Eigen::Triplet<double> triplet;
using namespace Eigen;
using namespace std;
// [[Rcpp::export]]
List abessGLM_API(Eigen::MatrixXd x, Eigen::MatrixXd y, int n, int p, int normalize_type, Eigen::VectorXd weight,
int algorithm_type, int model_type, int max_iter, int exchange_num, int path_type, bool is_warm_start,
int ic_type, double ic_coef, int Kfold, Eigen::VectorXi sequence, Eigen::VectorXd lambda_seq,
int s_min, int s_max, double lambda_min, double lambda_max, int nlambda, int screening_size,
Eigen::VectorXi g_index, Eigen::VectorXi always_select, int primary_model_fit_max_iter,
double primary_model_fit_epsilon, bool early_stop, bool approximate_Newton, int thread,
bool covariance_update, bool sparse_matrix, int splicing_type, int sub_search,
Eigen::VectorXi cv_fold_id, Eigen::VectorXi A_init, bool fit_intercept) {
#ifdef _OPENMP
// Eigen::initParallel();
int max_thread = omp_get_max_threads();
if (thread == 0 || thread > max_thread) {
thread = max_thread;
}
Eigen::setNbThreads(thread);
omp_set_num_threads(thread);
#endif
int algorithm_list_size = max(thread, Kfold);
vector<Algorithm<Eigen::VectorXd, Eigen::VectorXd, double, Eigen::MatrixXd> *> algorithm_list_uni_dense(
algorithm_list_size);
vector<Algorithm<Eigen::MatrixXd, Eigen::MatrixXd, Eigen::VectorXd, Eigen::MatrixXd> *> algorithm_list_mul_dense(
algorithm_list_size);
vector<Algorithm<Eigen::VectorXd, Eigen::VectorXd, double, Eigen::SparseMatrix<double>> *>
algorithm_list_uni_sparse(algorithm_list_size);
vector<Algorithm<Eigen::MatrixXd, Eigen::MatrixXd, Eigen::VectorXd, Eigen::SparseMatrix<double>> *>
algorithm_list_mul_sparse(algorithm_list_size);
for (int i = 0; i < algorithm_list_size; i++) {
if (!sparse_matrix) {
if (model_type == 1) {
abessLm<Eigen::MatrixXd> *temp = new abessLm<Eigen::MatrixXd>(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon,
is_warm_start, exchange_num, always_select, splicing_type, sub_search);
temp->covariance_update = covariance_update;
temp->fit_intercept = fit_intercept;
algorithm_list_uni_dense[i] = temp;
} else if (model_type == 2) {
abessLogistic<Eigen::MatrixXd> *temp = new abessLogistic<Eigen::MatrixXd>(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon,
is_warm_start, exchange_num, always_select, splicing_type, sub_search);
temp->approximate_Newton = approximate_Newton;
temp->fit_intercept = fit_intercept;
algorithm_list_uni_dense[i] = temp;
} else if (model_type == 3) {
abessPoisson<Eigen::MatrixXd> *temp = new abessPoisson<Eigen::MatrixXd>(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon,
is_warm_start, exchange_num, always_select, splicing_type, sub_search);
temp->approximate_Newton = approximate_Newton;
temp->fit_intercept = fit_intercept;
algorithm_list_uni_dense[i] = temp;
} else if (model_type == 4) {
abessCox<Eigen::MatrixXd> *temp = new abessCox<Eigen::MatrixXd>(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon,
is_warm_start, exchange_num, always_select, splicing_type, sub_search);
temp->approximate_Newton = approximate_Newton;
temp->fit_intercept = fit_intercept;
algorithm_list_uni_dense[i] = temp;
} else if (model_type == 5) {
abessMLm<Eigen::MatrixXd> *temp = new abessMLm<Eigen::MatrixXd>(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon,
is_warm_start, exchange_num, always_select, splicing_type, sub_search);
temp->covariance_update = covariance_update;
temp->fit_intercept = fit_intercept;
algorithm_list_mul_dense[i] = temp;
} else if (model_type == 6) {
abessMultinomial<Eigen::MatrixXd> *temp = new abessMultinomial<Eigen::MatrixXd>(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon,
is_warm_start, exchange_num, always_select, splicing_type, sub_search);
temp->approximate_Newton = approximate_Newton;
temp->fit_intercept = fit_intercept;
algorithm_list_mul_dense[i] = temp;
} else if (model_type == 8) {
abessGamma<Eigen::MatrixXd> *temp = new abessGamma<Eigen::MatrixXd>(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon,
is_warm_start, exchange_num, always_select, splicing_type, sub_search);
temp->approximate_Newton = approximate_Newton;
temp->fit_intercept = fit_intercept;
algorithm_list_uni_dense[i] = temp;
} else if (model_type == 9) {
abessOrdinal<Eigen::MatrixXd> *temp = new abessOrdinal<Eigen::MatrixXd>(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon,
is_warm_start, exchange_num, always_select, splicing_type, sub_search);
temp->fit_intercept = fit_intercept;
algorithm_list_mul_dense[i] = temp;
}
} else {
if (model_type == 1) {
abessLm<Eigen::SparseMatrix<double>> *temp = new abessLm<Eigen::SparseMatrix<double>>(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon,
is_warm_start, exchange_num, always_select, splicing_type, sub_search);
temp->covariance_update = covariance_update;
temp->fit_intercept = fit_intercept;
algorithm_list_uni_sparse[i] = temp;
} else if (model_type == 2) {
abessLogistic<Eigen::SparseMatrix<double>> *temp = new abessLogistic<Eigen::SparseMatrix<double>>(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon,
is_warm_start, exchange_num, always_select, splicing_type, sub_search);
temp->approximate_Newton = approximate_Newton;
temp->fit_intercept = fit_intercept;
algorithm_list_uni_sparse[i] = temp;
} else if (model_type == 3) {
abessPoisson<Eigen::SparseMatrix<double>> *temp = new abessPoisson<Eigen::SparseMatrix<double>>(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon,
is_warm_start, exchange_num, always_select, splicing_type, sub_search);
temp->approximate_Newton = approximate_Newton;
temp->fit_intercept = fit_intercept;
algorithm_list_uni_sparse[i] = temp;
} else if (model_type == 4) {
abessCox<Eigen::SparseMatrix<double>> *temp = new abessCox<Eigen::SparseMatrix<double>>(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon,
is_warm_start, exchange_num, always_select, splicing_type, sub_search);
temp->approximate_Newton = approximate_Newton;
temp->fit_intercept = fit_intercept;
algorithm_list_uni_sparse[i] = temp;
} else if (model_type == 5) {
abessMLm<Eigen::SparseMatrix<double>> *temp = new abessMLm<Eigen::SparseMatrix<double>>(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon,
is_warm_start, exchange_num, always_select, splicing_type, sub_search);
temp->covariance_update = covariance_update;
temp->fit_intercept = fit_intercept;
algorithm_list_mul_sparse[i] = temp;
} else if (model_type == 6) {
abessMultinomial<Eigen::SparseMatrix<double>> *temp = new abessMultinomial<Eigen::SparseMatrix<double>>(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon,
is_warm_start, exchange_num, always_select, splicing_type, sub_search);
temp->approximate_Newton = approximate_Newton;
temp->fit_intercept = fit_intercept;
algorithm_list_mul_sparse[i] = temp;
} else if (model_type == 8) {
abessGamma<Eigen::SparseMatrix<double>> *temp = new abessGamma<Eigen::SparseMatrix<double>>(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon,
is_warm_start, exchange_num, always_select, splicing_type, sub_search);
temp->approximate_Newton = approximate_Newton;
temp->fit_intercept = fit_intercept;
algorithm_list_uni_sparse[i] = temp;
} else if (model_type == 9) {
abessOrdinal<Eigen::SparseMatrix<double>> *temp = new abessOrdinal<Eigen::SparseMatrix<double>>(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon,
is_warm_start, exchange_num, always_select, splicing_type, sub_search);
temp->fit_intercept = fit_intercept;
algorithm_list_mul_sparse[i] = temp;
}
}
}
// suppose X has been centered for no-intercept model
if (normalize_type > 0 && !fit_intercept) normalize_type = 3;
// parameter list
Parameters parameters(sequence, lambda_seq, s_min, s_max);
List out_result;
if (!sparse_matrix) {
if (y.cols() == 1 && model_type != 5 && model_type != 6) {
Eigen::VectorXd y_vec = y.col(0).eval();
out_result = abessWorkflow<Eigen::VectorXd, Eigen::VectorXd, double, Eigen::MatrixXd>(
x, y_vec, n, p, normalize_type, weight, algorithm_type, path_type, is_warm_start, ic_type, ic_coef,
Kfold, parameters, screening_size, g_index, early_stop, thread, sparse_matrix, cv_fold_id, A_init,
algorithm_list_uni_dense);
} else {
out_result = abessWorkflow<Eigen::MatrixXd, Eigen::MatrixXd, Eigen::VectorXd, Eigen::MatrixXd>(
x, y, n, p, normalize_type, weight, algorithm_type, path_type, is_warm_start, ic_type, ic_coef, Kfold,
parameters, screening_size, g_index, early_stop, thread, sparse_matrix, cv_fold_id, A_init,
algorithm_list_mul_dense);
}
} else {
Eigen::SparseMatrix<double> sparse_x(n, p);
// std::vector<triplet> tripletList;
// tripletList.reserve(x.rows());
// for (int i = 0; i < x.rows(); i++)
// {
// tripletList.push_back(triplet(int(x(i, 1)), int(x(i, 2)), x(i, 0)));
// }
// sparse_x.setFromTriplets(tripletList.begin(), tripletList.end());
sparse_x.reserve(x.rows());
for (int i = 0; i < x.rows(); i++) {
sparse_x.insert(int(x(i, 1)), int(x(i, 2))) = x(i, 0);
}
sparse_x.makeCompressed();
if (y.cols() == 1 && model_type != 5 && model_type != 6) {
Eigen::VectorXd y_vec = y.col(0).eval();
out_result = abessWorkflow<Eigen::VectorXd, Eigen::VectorXd, double, Eigen::SparseMatrix<double>>(
sparse_x, y_vec, n, p, normalize_type, weight, algorithm_type, path_type, is_warm_start, ic_type,
ic_coef, Kfold, parameters, screening_size, g_index, early_stop, thread, sparse_matrix, cv_fold_id,
A_init, algorithm_list_uni_sparse);
} else {
out_result = abessWorkflow<Eigen::MatrixXd, Eigen::MatrixXd, Eigen::VectorXd, Eigen::SparseMatrix<double>>(
sparse_x, y, n, p, normalize_type, weight, algorithm_type, path_type, is_warm_start, ic_type, ic_coef,
Kfold, parameters, screening_size, g_index, early_stop, thread, sparse_matrix, cv_fold_id, A_init,
algorithm_list_mul_sparse);
}
}
for (int i = 0; i < algorithm_list_size; i++) {
delete algorithm_list_uni_dense[i];
delete algorithm_list_mul_dense[i];
delete algorithm_list_uni_sparse[i];
delete algorithm_list_mul_sparse[i];
}
return out_result;
};
// [[Rcpp::export]]
List abessPCA_API(Eigen::MatrixXd x, int n, int p, int normalize_type, Eigen::VectorXd weight, Eigen::MatrixXd sigma,
int max_iter, int exchange_num, int path_type, bool is_warm_start, int ic_type, double ic_coef,
int Kfold, Eigen::MatrixXi sequence, int s_min, int s_max, int screening_size,
Eigen::VectorXi g_index, Eigen::VectorXi always_select, bool early_stop, int thread,
bool sparse_matrix, int splicing_type, int sub_search, Eigen::VectorXi cv_fold_id, int pca_num,
Eigen::VectorXi A_init) {
/* this function for abessPCA only (model_type == 7) */
#ifdef _OPENMP
// Eigen::initParallel();
int max_thread = omp_get_max_threads();
if (thread == 0 || thread > max_thread) {
thread = max_thread;
}
Eigen::setNbThreads(thread);
omp_set_num_threads(thread);
#endif
int model_type = 7, algorithm_type = 6;
Eigen::VectorXd lambda_seq = Eigen::VectorXd::Zero(1);
int lambda_min = 0, lambda_max = 0, nlambda = 100;
int primary_model_fit_max_iter = 1;
double primary_model_fit_epsilon = 1e-3;
int pca_n = -1;
sub_search = 0;
if (!sparse_matrix && n != x.rows()) {
pca_n = n;
n = x.rows();
}
Eigen::VectorXd y_vec = Eigen::VectorXd::Zero(n);
//////////////////// function generate_algorithm_pointer() ////////////////////////////
int algorithm_list_size = max(thread, Kfold);
vector<Algorithm<Eigen::VectorXd, Eigen::VectorXd, double, Eigen::MatrixXd> *> algorithm_list_uni_dense(
algorithm_list_size);
vector<Algorithm<Eigen::VectorXd, Eigen::VectorXd, double, Eigen::SparseMatrix<double>> *>
algorithm_list_uni_sparse(algorithm_list_size);
for (int i = 0; i < algorithm_list_size; i++) {
if (!sparse_matrix) {
abessPCA<Eigen::MatrixXd> *temp = new abessPCA<Eigen::MatrixXd>(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon,
is_warm_start, exchange_num, always_select, splicing_type, sub_search);
temp->is_cv = Kfold > 1;
temp->pca_n = pca_n;
temp->sigma = sigma;
algorithm_list_uni_dense[i] = temp;
} else {
abessPCA<Eigen::SparseMatrix<double>> *temp = new abessPCA<Eigen::SparseMatrix<double>>(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon,
is_warm_start, exchange_num, always_select, splicing_type, sub_search);
temp->is_cv = Kfold > 1;
temp->pca_n = pca_n;
temp->sigma = sigma;
algorithm_list_uni_sparse[i] = temp;
}
}
// call `abessWorkflow` for result
#ifdef R_BUILD
List out_result(pca_num);
#else
List out_result;
#endif
List out_result_next;
int num = 0;
if (!sparse_matrix) {
while (num++ < pca_num) {
int pca_support_size_num = sequence.col(num - 1).sum();
Eigen::VectorXi pca_support_size(pca_support_size_num);
// map sequence matrix to support.size
int non_zero_num = 0;
for (int i = 0; i < sequence.rows(); i++) {
if (sequence(i, num - 1) == 1) {
pca_support_size(non_zero_num++) = i + 1;
}
}
// parameter list
Parameters parameters(pca_support_size, lambda_seq, s_min, s_max);
out_result_next = abessWorkflow<Eigen::VectorXd, Eigen::VectorXd, double, Eigen::MatrixXd>(
x, y_vec, n, p, normalize_type, weight, algorithm_type, path_type, is_warm_start, ic_type, ic_coef,
Kfold, parameters, screening_size, g_index, early_stop, thread, sparse_matrix, cv_fold_id, A_init,
algorithm_list_uni_dense);
Eigen::VectorXd beta_next;
#ifdef R_BUILD
beta_next = out_result_next["beta"];
#else
out_result_next.get_value_by_name("beta", beta_next);
#endif
if (num == 1) {
#ifdef R_BUILD
if (pca_num > 1) {
out_result(0) = out_result_next;
} else {
out_result = out_result_next;
}
#else
out_result = out_result_next;
#endif
} else {
#ifdef R_BUILD
// Eigen::MatrixXd beta_new(p, num);
// Eigen::VectorXd temp = out_result["beta"];
// Eigen::Map<Eigen::MatrixXd> temp2(temp.data(), p, num - 1);
// beta_new << temp2, beta_next;
// out_result["beta"] = beta_new;
out_result(num - 1) = out_result_next;
#else
out_result.combine_beta(beta_next);
#endif
}
if (num < pca_num) {
Eigen::MatrixXd temp = beta_next * beta_next.transpose();
if (Kfold > 1) {
x -= x * temp;
} else {
Eigen::MatrixXd temp1 = temp * sigma;
sigma += temp1 * temp - temp1 - temp1.transpose();
for (int i = 0; i < algorithm_list_size; i++) {
abessPCA<Eigen::MatrixXd> *pca_model =
dynamic_cast<abessPCA<Eigen::MatrixXd> *>(algorithm_list_uni_dense[i]);
if (pca_model) {
// cout << "update Sigma"<<endl;
pca_model->sigma = sigma;
}
}
}
}
}
} else {
Eigen::SparseMatrix<double> sparse_x(n, p);
// std::vector<triplet> tripletList;
// tripletList.reserve(x.rows());
// for (int i = 0; i < x.rows(); i++)
// {
// tripletList.push_back(triplet(int(x(i, 1)), int(x(i, 2)), x(i, 0)));
// }
// sparse_x.setFromTriplets(tripletList.begin(), tripletList.end());
sparse_x.reserve(x.rows());
for (int i = 0; i < x.rows(); i++) {
sparse_x.insert(int(x(i, 1)), int(x(i, 2))) = x(i, 0);
}
sparse_x.makeCompressed();
while (num++ < pca_num) {
int pca_support_size_num = sequence.col(num - 1).sum();
Eigen::VectorXi pca_support_size(pca_support_size_num);
// map sequence matrix to support.size
int non_zero_num = 0;
for (int i = 0; i < sequence.rows(); i++) {
if (sequence(i, num - 1) == 1) {
pca_support_size(non_zero_num++) = i + 1;
}
}
// parameter list
Parameters parameters(pca_support_size, lambda_seq, s_min, s_max);
out_result_next = abessWorkflow<Eigen::VectorXd, Eigen::VectorXd, double, Eigen::SparseMatrix<double>>(
sparse_x, y_vec, n, p, normalize_type, weight, algorithm_type, path_type, is_warm_start, ic_type,
ic_coef, Kfold, parameters, screening_size, g_index, early_stop, thread, sparse_matrix, cv_fold_id,
A_init, algorithm_list_uni_sparse);
Eigen::VectorXd beta_next;
#ifdef R_BUILD
beta_next = out_result_next["beta"];
#else
out_result_next.get_value_by_name("beta", beta_next);
#endif
if (num == 1) {
#ifdef R_BUILD
if (pca_num > 1) {
out_result(0) = out_result_next;
} else {
out_result = out_result_next;
}
#else
out_result = out_result_next;
#endif
} else {
#ifdef R_BUILD
// Eigen::MatrixXd beta_new(p, num);
// Eigen::VectorXd temp = out_result["beta"];
// Eigen::Map<Eigen::MatrixXd> temp2(temp.data(), p, num - 1);
// beta_new << temp2, beta_next;
// out_result["beta"] = beta_new;
out_result(num - 1) = out_result_next;
#else
out_result.combine_beta(beta_next);
#endif
}
// update for next PCA
if (num < pca_num) {
Eigen::MatrixXd temp = beta_next * beta_next.transpose();
if (Kfold > 1) {
sparse_x = sparse_x - sparse_x * temp;
} else {
Eigen::MatrixXd temp1 = temp * sigma;
sigma += temp1 * temp - temp1 - temp1.transpose();
for (int i = 0; i < algorithm_list_size; i++) {
abessPCA<Eigen::SparseMatrix<double>> *pca_model =
dynamic_cast<abessPCA<Eigen::SparseMatrix<double>> *>(algorithm_list_uni_sparse[i]);
if (pca_model) {
// cout << "update Sigma"<<endl;
pca_model->sigma = sigma;
}
}
}
}
}
}
for (int i = 0; i < algorithm_list_size; i++) {
delete algorithm_list_uni_dense[i];
delete algorithm_list_uni_sparse[i];
}
return out_result;
};
// [[Rcpp::export]]
List abessRPCA_API(Eigen::MatrixXd x, int n, int p, int max_iter, int exchange_num, int path_type, bool is_warm_start,
int ic_type, double ic_coef, Eigen::VectorXi sequence,
Eigen::VectorXd lambda_seq, // rank of L
int s_min, int s_max, double lambda_min, double lambda_max, int nlambda, int screening_size,
int primary_model_fit_max_iter, double primary_model_fit_epsilon, Eigen::VectorXi g_index,
Eigen::VectorXi always_select, bool early_stop, int thread, bool sparse_matrix, int splicing_type,
int sub_search, Eigen::VectorXi A_init) {
#ifdef _OPENMP
// Eigen::initParallel();
int max_thread = omp_get_max_threads();
if (thread == 0 || thread > max_thread) {
thread = max_thread;
}
Eigen::setNbThreads(thread);
omp_set_num_threads(thread);
#endif
int model_type = 10, algorithm_type = 6;
int Kfold = 1;
int normalize_type = 0;
Eigen::VectorXi cv_fold_id = Eigen::VectorXi::Zero(0);
Eigen::VectorXd weight = Eigen::VectorXd::Ones(n);
Eigen::VectorXd y_vec = Eigen::VectorXd::Zero(n);
int algorithm_list_size = max(thread, Kfold);
vector<Algorithm<Eigen::VectorXd, Eigen::VectorXd, double, Eigen::MatrixXd> *> algorithm_list_uni_dense(
algorithm_list_size);
vector<Algorithm<Eigen::VectorXd, Eigen::VectorXd, double, Eigen::SparseMatrix<double>> *>
algorithm_list_uni_sparse(algorithm_list_size);
for (int i = 0; i < algorithm_list_size; i++) {
if (!sparse_matrix) {
abessRPCA<Eigen::MatrixXd> *temp = new abessRPCA<Eigen::MatrixXd>(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon,
is_warm_start, exchange_num, always_select, splicing_type, sub_search);
temp->r = lambda_seq(0);
algorithm_list_uni_dense[i] = temp;
} else {
abessRPCA<Eigen::SparseMatrix<double>> *temp = new abessRPCA<Eigen::SparseMatrix<double>>(
algorithm_type, model_type, max_iter, primary_model_fit_max_iter, primary_model_fit_epsilon,
is_warm_start, exchange_num, always_select, splicing_type, sub_search);
temp->r = lambda_seq(0);
algorithm_list_uni_sparse[i] = temp;
}
}
// parameter list
Parameters parameters(sequence, lambda_seq, s_min, s_max);
List out_result;
if (!sparse_matrix) {
out_result = abessWorkflow<Eigen::VectorXd, Eigen::VectorXd, double, Eigen::MatrixXd>(
x, y_vec, n, p, normalize_type, weight, algorithm_type, path_type, is_warm_start, ic_type, ic_coef, Kfold,
parameters, screening_size, g_index, early_stop, thread, sparse_matrix, cv_fold_id, A_init,
algorithm_list_uni_dense);
} else {
Eigen::SparseMatrix<double> sparse_x(n, p);
// std::vector<triplet> tripletList;
// tripletList.reserve(x.rows());
// for (int i = 0; i < x.rows(); i++)
// {
// tripletList.push_back(triplet(int(x(i, 1)), int(x(i, 2)), x(i, 0)));
// }
// sparse_x.setFromTriplets(tripletList.begin(), tripletList.end());
sparse_x.reserve(x.rows());
for (int i = 0; i < x.rows(); i++) {
sparse_x.insert(int(x(i, 1)), int(x(i, 2))) = x(i, 0);
}
sparse_x.makeCompressed();
out_result = abessWorkflow<Eigen::VectorXd, Eigen::VectorXd, double, Eigen::SparseMatrix<double>>(
sparse_x, y_vec, n, p, normalize_type, weight, algorithm_type, path_type, is_warm_start, ic_type, ic_coef,
Kfold, parameters, screening_size, g_index, early_stop, thread, sparse_matrix, cv_fold_id, A_init,
algorithm_list_uni_sparse);
}
for (int i = 0; i < algorithm_list_size; i++) {
delete algorithm_list_uni_dense[i];
delete algorithm_list_uni_sparse[i];
}
return out_result;
}
| 26,560 | 46.857658 | 120 | cpp |
abess | abess-master/src/api.h | /*****************************************************************************
* OpenST Basic tool library *
* Copyright (C) 2021 Kangkang Jiang jiangkk3@mail2.sysu.edu.cn *
* *
* This file is part of OST. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License version 3 as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License *
* along with OST. If not, see <http://www.gnu.org/licenses/>. *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
* @file abess.h *
* @brief The main function of abess fremework *
* *
* *
* @author Kangkang Jiang *
* @email jiangkk3@mail2.sysu.edu.cn *
* @version 0.0.1 *
* @date 2021-07-31 *
* @license GNU General Public License (GPL) *
* *
*----------------------------------------------------------------------------*
* Remark : Description *
*----------------------------------------------------------------------------*
* Change History : *
* <Date> | <Version> | <Author> | <Description> *
*----------------------------------------------------------------------------*
* 2021/07/31 | 0.0.1 | Kangkang Jiang | First version *
*----------------------------------------------------------------------------*
* *
*****************************************************************************/
#ifndef SRC_API_H
#define SRC_API_H
#ifdef R_BUILD
#include <Rcpp.h>
#include <RcppEigen.h>
// [[Rcpp::depends(RcppEigen)]]
using namespace Rcpp;
#else
#include <Eigen/Eigen>
#include "List.h"
#endif
#include <iostream>
/**
* @brief The main function of abess fremework
* @param X Training data.
* @param y Target values. Will be cast to X's dtype if necessary.
* For linear regression problem, y should be a $n \time 1$ numpy array with type
* \code{double}. For classification problem, \code{y} should be a $n \time 1$ numpy array with values \code{0} or
* \code{1}. For count data, \code{y} should be a $n \time 1$ numpy array of non-negative integer. Note that, for
* multivariate problem, \code{y} can also be a matrix shaped $n \time M$, where $M > 1$.
* @param n Sample size.
* @param p Variable dimension.
* @param weight Individual weights for each sample.
* @param sigma Sample covariance matrix. For PCA problem under Kfold=1, it should be given as
* input, instead of X.
* @param normalize_type Type of normalization on X before fitting the algorithm. If normalize_type=0,
* normalization will not be used.
* @param algorithm_type Algorithm type.
* @param model_type Model type.
* @param max_iter Maximum number of iterations taken for the splicing algorithm to converge.
* Due to the limitation of loss reduction, the splicing algorithm must be able to
* converge. The number of iterations is only to simplify the implementation.
* @param exchange_num Max exchange variable num for the splicing algorithm.
* @param path_type The method to be used to select the optimal support size.
* For path_type = 1, we solve the best subset selection problem for each size in
* support_size. For path_type = 2, we solve the best subset selection problem with support size ranged in (s_min,
* s_max), where the specific support size to be considered is determined by golden section.
* @param is_warm_start When tuning the optimal parameter combination, whether to use the last solution
* as a warm start to accelerate the iterative convergence of the splicing algorithm.
* @param ic_type The type of criterion for choosing the support size.
* @param Kfold The folds number to use the Cross-validation method. If Kfold=1,
* Cross-validation will not be used.
* @param sequence An integer vector representing the alternative support sizes. Only used for
* path_type = 1.
* @param s_min The lower bound of golden-section-search for sparsity searching. Only used for
* path_type = 2.
* @param s_max The higher bound of golden-section-search for sparsity searching. Only used for
* path_type = 2.
* @param thread Max number of multithreads. If thread = 0, the program will use the maximum
* number supported by the device.
* @param screening_size Screen the variables first and use the chosen variables in abess process.
* The number of variables remaining after screening. It should be an integer
* smaller than p. If screen_size = -1, screening will not be used.
* @param g_index The group index for each variable.
* @param always_select An array contains the indexes of variables we want to consider in the model.
* @param primary_model_fit_max_iter The maximal number of iteration in `primary_model_fit()` (in Algorithm.h).
* @param primary_model_fit_epsilon The epsilon (threshold) of iteration in `primary_model_fit()` (in Algorithm.h).
* @param splicing_type The type of splicing in `fit()` (in Algorithm.h).
* "0" for decreasing by half, "1" for decresing by one.
* @param sub_search The number of inactive sets that are split when splicing. It should be a
* positive integer.
* @return result list.
*/
List abessGLM_API(Eigen::MatrixXd x, Eigen::MatrixXd y, int n, int p, int normalize_type, Eigen::VectorXd weight,
int algorithm_type, int model_type, int max_iter, int exchange_num, int path_type, bool is_warm_start,
int ic_type, double ic_coef, int Kfold, Eigen::VectorXi sequence, Eigen::VectorXd lambda_seq,
int s_min, int s_max, double lambda_min, double lambda_max, int nlambda, int screening_size,
Eigen::VectorXi g_index, Eigen::VectorXi always_select, int primary_model_fit_max_iter,
double primary_model_fit_epsilon, bool early_stop, bool approximate_Newton, int thread,
bool covariance_update, bool sparse_matrix, int splicing_type, int sub_search,
Eigen::VectorXi cv_fold_id, Eigen::VectorXi A_init, bool fit_intercept);
List abessPCA_API(Eigen::MatrixXd x, int n, int p, int normalize_type, Eigen::VectorXd weight, Eigen::MatrixXd sigma,
int max_iter, int exchange_num, int path_type, bool is_warm_start, int ic_type, double ic_coef,
int Kfold, Eigen::MatrixXi sequence, int s_min, int s_max, int screening_size,
Eigen::VectorXi g_index, Eigen::VectorXi always_select, bool early_stop, int thread,
bool sparse_matrix, int splicing_type, int sub_search, Eigen::VectorXi cv_fold_id, int pca_num,
Eigen::VectorXi A_init);
List abessRPCA_API(Eigen::MatrixXd x, int n, int p, int max_iter, int exchange_num, int path_type, bool is_warm_start,
int ic_type, double ic_coef, Eigen::VectorXi sequence,
Eigen::VectorXd lambda_seq, // rank of L
int s_min, int s_max, double lambda_min, double lambda_max, int nlambda, int screening_size,
int primary_model_fit_max_iter, double primary_model_fit_epsilon, Eigen::VectorXi g_index,
Eigen::VectorXi always_select, bool early_stop, int thread, bool sparse_matrix, int splicing_type,
int sub_search, Eigen::VectorXi A_init);
#endif // SRC_API_H
| 9,617 | 71.315789 | 120 | h |
abess | abess-master/src/normalize.cpp | //
// Created by Kangkang Jiang on 2020/3/8.
//
// #define R_BUILD
#ifdef R_BUILD
#include <Rcpp.h>
#include <RcppEigen.h>
// [[Rcpp::depends(RcppEigen)]]
using namespace Rcpp;
#else
#include <Eigen/Eigen>
#endif
#include <iostream>
#include <exception>
#include <string>
using namespace std;
void constant_warning_ith_variable(int i) {
#ifdef R_BUILD
// Rcout << "Warning: the variable " << i + 1 << " is constant. ";
// Rcout << "It may cause NAN in the result. Please drop this variable or disable the normalization.\n";
i++;
#else
// cout << "Warning: the variable " << i << " is constant. ";
// cout << "It may cause NAN in the result. Please drop this variable or disable the normalization.\n";
#endif
string msg = "The variable " + std::to_string(i) + " is constant. " +
"Please drop this variable or disable the normalization.";
throw std::overflow_error(msg);
}
void Normalize(Eigen::MatrixXd &X, Eigen::VectorXd &y, Eigen::VectorXd &weights, Eigen::VectorXd &meanx, double &meany,
Eigen::VectorXd &normx) {
int n = X.rows();
int p = X.cols();
Eigen::VectorXd tmp(n);
for (int i = 0; i < p; i++) {
meanx(i) = weights.dot(X.col(i)) / double(n);
}
meany = (y.dot(weights)) / double(n);
for (int i = 0; i < p; i++) {
X.col(i) = X.col(i).array() - meanx(i);
}
y = y.array() - meany;
for (int i = 0; i < p; i++) {
tmp = X.col(i);
tmp = tmp.array().square();
normx(i) = sqrt(weights.dot(tmp));
if (normx(i) == 0) {
constant_warning_ith_variable(i);
}
}
for (int i = 0; i < p; i++) {
X.col(i) = sqrt(double(n)) * X.col(i) / normx(i);
}
}
void Normalize(Eigen::MatrixXd &X, Eigen::MatrixXd &y, Eigen::VectorXd &weights, Eigen::VectorXd &meanx,
Eigen::VectorXd &meany, Eigen::VectorXd &normx) {
int n = X.rows();
int p = X.cols();
Eigen::VectorXd tmp(n);
for (int i = 0; i < p; i++) {
meanx(i) = weights.dot(X.col(i)) / double(n);
}
meany = y.transpose() * weights / double(n);
for (int i = 0; i < p; i++) {
X.col(i) = X.col(i).array() - meanx(i);
}
for (int i = 0; i < n; i++) {
y.row(i) = y.row(i) - meany;
}
// y = y.array() - meany;
for (int i = 0; i < p; i++) {
tmp = X.col(i);
tmp = tmp.array().square();
normx(i) = sqrt(weights.dot(tmp));
if (normx(i) == 0) {
constant_warning_ith_variable(i);
}
}
for (int i = 0; i < p; i++) {
X.col(i) = sqrt(double(n)) * X.col(i) / normx(i);
}
}
void Normalize3(Eigen::MatrixXd &X, Eigen::VectorXd &weights, Eigen::VectorXd &meanx, Eigen::VectorXd &normx) {
int n = X.rows();
int p = X.cols();
Eigen::VectorXd tmp(n);
for (int i = 0; i < p; i++) {
meanx(i) = weights.dot(X.col(i)) / double(n);
}
for (int i = 0; i < p; i++) {
X.col(i) = X.col(i).array() - meanx(i);
}
for (int i = 0; i < p; i++) {
tmp = X.col(i);
tmp = tmp.array().square();
normx(i) = sqrt(weights.dot(tmp));
if (normx(i) == 0) {
constant_warning_ith_variable(i);
}
}
for (int i = 0; i < p; i++) {
X.col(i) = sqrt(double(n)) * X.col(i) / normx(i);
}
}
void Normalize4(Eigen::MatrixXd &X, Eigen::VectorXd &weights, Eigen::VectorXd &normx) {
int n = X.rows();
int p = X.cols();
Eigen::VectorXd tmp(n);
for (int i = 0; i < p; i++) {
tmp = X.col(i);
tmp = tmp.array().square();
normx(i) = sqrt(weights.dot(tmp));
if (normx(i) == 0) {
constant_warning_ith_variable(i);
}
}
for (int i = 0; i < p; i++) {
X.col(i) = sqrt(double(n)) * X.col(i) / normx(i);
}
}
void Normalize(Eigen::SparseMatrix<double> &X, Eigen::VectorXd &y, Eigen::VectorXd &weights, Eigen::VectorXd &meanx,
double &meany, Eigen::VectorXd &normx) {
return;
}
void Normalize(Eigen::SparseMatrix<double> &X, Eigen::MatrixXd &y, Eigen::VectorXd &weights, Eigen::VectorXd &meanx,
Eigen::VectorXd &meany, Eigen::VectorXd &normx) {
return;
}
void Normalize3(Eigen::SparseMatrix<double> &X, Eigen::VectorXd &weights, Eigen::VectorXd &meanx,
Eigen::VectorXd &normx) {
return;
}
void Normalize4(Eigen::SparseMatrix<double> &X, Eigen::VectorXd &weights, Eigen::VectorXd &normx) { return; }
| 4,467 | 30.027778 | 119 | cpp |
abess | abess-master/src/normalize.h | //
// Created by Jin Zhu on 2020/3/8.
//
// #define R_BUILD
#ifndef SRC_NORMALIZE_H
#define SRC_NORMALIZE_H
#ifdef R_BUILD
#include <RcppEigen.h>
#else
#include <Eigen/Eigen>
#endif
void constant_warning_ith_variable(int i);
void Normalize(Eigen::MatrixXd &X, Eigen::VectorXd &y, Eigen::VectorXd &weights, Eigen::VectorXd &meanx, double &meany,
Eigen::VectorXd &normx);
void Normalize(Eigen::MatrixXd &X, Eigen::MatrixXd &y, Eigen::VectorXd &weights, Eigen::VectorXd &meanx,
Eigen::VectorXd &meany, Eigen::VectorXd &normx);
void Normalize3(Eigen::MatrixXd &X, Eigen::VectorXd &weights, Eigen::VectorXd &meanx, Eigen::VectorXd &normx);
void Normalize4(Eigen::MatrixXd &X, Eigen::VectorXd &weights, Eigen::VectorXd &normx);
void Normalize(Eigen::SparseMatrix<double> &X, Eigen::VectorXd &y, Eigen::VectorXd &weights, Eigen::VectorXd &meanx,
double &meany, Eigen::VectorXd &normx);
void Normalize(Eigen::SparseMatrix<double> &X, Eigen::MatrixXd &y, Eigen::VectorXd &weights, Eigen::VectorXd &meanx,
Eigen::VectorXd &meany, Eigen::VectorXd &normx);
void Normalize3(Eigen::SparseMatrix<double> &X, Eigen::VectorXd &weights, Eigen::VectorXd &meanx,
Eigen::VectorXd &normx);
void Normalize4(Eigen::SparseMatrix<double> &X, Eigen::VectorXd &weights, Eigen::VectorXd &normx);
#endif // SRC_NORMALIZE_H
| 1,375 | 42 | 119 | h |
abess | abess-master/src/path.h | //
// Created by Jin Zhu on 2020/3/8.
//
#ifndef SRC_PATH_H
#define SRC_PATH_H
#ifdef R_BUILD
#include <RcppEigen.h>
// [[Rcpp::depends(RcppEigen)]s]
using namespace Eigen;
#else
#include <Eigen/Eigen>
#include "List.h"
#endif
#include <vector>
#include "Algorithm.h"
#include "Data.h"
#include "Metric.h"
#include "utilities.h"
template <class T1, class T2, class T3, class T4>
void sequential_path_cv(Data<T1, T2, T3, T4> &data, Algorithm<T1, T2, T3, T4> *algorithm,
Metric<T1, T2, T3, T4> *metric, Parameters ¶meters, bool early_stop, int k,
Eigen::VectorXi &A_init, Result<T2, T3> &result) {
int beta_size = algorithm->get_beta_size(data.n, data.p);
int p = data.p;
int N = data.g_num;
int M = data.M;
Eigen::VectorXi g_index = data.g_index;
Eigen::VectorXi g_size = data.g_size;
int sequence_size = (parameters.sequence).size();
// int early_stop_s = sequence_size;
Eigen::VectorXi train_mask, test_mask;
T1 train_y, test_y;
Eigen::VectorXd train_weight, test_weight;
T4 train_x, test_x;
int train_n = 0, test_n = 0;
// train & test data
if (!metric->is_cv) {
train_x = data.x;
train_y = data.y;
train_weight = data.weight;
train_n = data.n;
} else {
train_mask = metric->train_mask_list[k];
test_mask = metric->test_mask_list[k];
slice(data.x, train_mask, train_x);
slice(data.x, test_mask, test_x);
slice(data.y, train_mask, train_y);
slice(data.y, test_mask, test_y);
slice(data.weight, train_mask, train_weight);
slice(data.weight, test_mask, test_weight);
train_n = train_mask.size();
test_n = test_mask.size();
}
Eigen::Matrix<T2, Dynamic, 1> beta_matrix(sequence_size, 1);
Eigen::Matrix<T3, Dynamic, 1> coef0_matrix(sequence_size, 1);
Eigen::MatrixXd train_loss_matrix(sequence_size, 1);
Eigen::MatrixXd ic_matrix(sequence_size, 1);
Eigen::MatrixXd test_loss_matrix(sequence_size, 1);
Eigen::Matrix<VectorXd, Dynamic, 1> bd_matrix(sequence_size, 1);
Eigen::MatrixXd effective_number_matrix(sequence_size, 1);
T2 beta_init;
T3 coef0_init;
coef_set_zero(beta_size, M, beta_init, coef0_init);
Eigen::VectorXd bd_init;
for (int ind = 0; ind < sequence_size; ind++) {
algorithm->update_sparsity_level(parameters.sequence(ind).support_size);
algorithm->update_lambda_level(parameters.sequence(ind).lambda);
algorithm->update_beta_init(beta_init);
algorithm->update_bd_init(bd_init);
algorithm->update_coef0_init(coef0_init);
algorithm->update_A_init(A_init, N);
algorithm->fit(train_x, train_y, train_weight, g_index, g_size, train_n, p, N);
if (algorithm->warm_start) {
beta_init = algorithm->get_beta();
coef0_init = algorithm->get_coef0();
bd_init = algorithm->get_bd();
}
// evaluate the beta
if (metric->is_cv) {
test_loss_matrix(ind) =
metric->test_loss(test_x, test_y, test_weight, g_index, g_size, test_n, p, N, algorithm);
} else {
ic_matrix(ind) = metric->ic(train_n, M, N, algorithm);
}
// save for best_model fit
beta_matrix(ind) = algorithm->get_beta();
coef0_matrix(ind) = algorithm->get_coef0();
train_loss_matrix(ind) = algorithm->get_train_loss();
bd_matrix(ind) = algorithm->get_bd();
effective_number_matrix(ind) = algorithm->get_effective_number();
}
// To be ensured
// if (early_stop && lambda_size <= 1 && i >= 3)
// {
// bool condition1 = ic_sequence(i, 0) > ic_sequence(i - 1, 0);
// bool condition2 = ic_sequence(i - 1, 0) > ic_sequence(i - 2, 0);
// bool condition3 = ic_sequence(i - 2, 0) > ic_sequence(i - 3, 0);
// if (condition1 && condition2 && condition3)
// {
// early_stop_s = i + 1;
// break;
// }
// }
// if (early_stop)
// {
// ic_sequence = ic_sequence.block(0, 0, early_stop_s, lambda_size).eval();
// }
result.beta_matrix = beta_matrix;
result.coef0_matrix = coef0_matrix;
result.train_loss_matrix = train_loss_matrix;
result.bd_matrix = bd_matrix;
result.ic_matrix = ic_matrix;
result.test_loss_matrix = test_loss_matrix;
result.effective_number_matrix = effective_number_matrix;
}
template <class T1, class T2, class T3, class T4>
void gs_path(Data<T1, T2, T3, T4> &data, vector<Algorithm<T1, T2, T3, T4> *> algorithm_list,
Metric<T1, T2, T3, T4> *metric, Parameters ¶meters, Eigen::VectorXi &A_init,
vector<Result<T2, T3>> &result_list) {
int s_min = parameters.s_min;
int s_max = parameters.s_max;
int sequence_size = s_max - s_min + 5;
Eigen::VectorXi support_size_list = Eigen::VectorXi::Zero(sequence_size);
// init: store for each fold
int Kfold = metric->Kfold;
vector<Eigen::Matrix<T2, -1, -1>> beta_matrix(Kfold);
vector<Eigen::Matrix<T3, -1, -1>> coef0_matrix(Kfold);
vector<Eigen::MatrixXd> train_loss_matrix(Kfold);
vector<Eigen::MatrixXd> ic_matrix(Kfold);
vector<Eigen::MatrixXd> test_loss_matrix(Kfold);
vector<Eigen::Matrix<VectorXd, -1, -1>> bd_matrix(Kfold);
vector<Eigen::MatrixXd> effective_number_matrix(Kfold);
for (int k = 0; k < Kfold; k++) {
beta_matrix[k].resize(sequence_size, 1);
coef0_matrix[k].resize(sequence_size, 1);
train_loss_matrix[k].resize(sequence_size, 1);
ic_matrix[k].resize(sequence_size, 1);
test_loss_matrix[k].resize(sequence_size, 1);
bd_matrix[k].resize(sequence_size, 1);
effective_number_matrix[k].resize(sequence_size, 1);
}
T2 beta_init;
T3 coef0_init;
int beta_size = algorithm_list[0]->get_beta_size(data.n, data.p);
coef_set_zero(beta_size, data.M, beta_init, coef0_init);
Eigen::VectorXd bd_init;
// gs only support the first lambda
FIT_ARG<T2, T3> fit_arg(0, parameters.lambda_list(0), beta_init, coef0_init, bd_init, A_init);
int ind = -1;
int left = round(0.618 * s_min + 0.382 * s_max);
int right = round(0.382 * s_min + 0.618 * s_max);
bool fit_l = true, fit_r = (left != right);
double loss_l = 0, loss_r = 0;
while (true) {
// cout<<" ==> gs: "<<s_min<<" - "<<s_max<<endl;
if (fit_l) {
fit_l = false;
fit_arg.support_size = left;
Eigen::VectorXd loss_list = metric->fit_and_evaluate_in_metric(algorithm_list, data, fit_arg);
loss_l = loss_list.mean();
// record: left
support_size_list(++ind) = left;
for (int k = 0; k < Kfold; k++) {
beta_matrix[k](ind) = algorithm_list[k]->beta;
coef0_matrix[k](ind) = algorithm_list[k]->coef0;
train_loss_matrix[k](ind) = algorithm_list[k]->get_train_loss();
bd_matrix[k](ind) = algorithm_list[k]->bd;
effective_number_matrix[k](ind) = algorithm_list[k]->get_effective_number();
if (metric->is_cv)
test_loss_matrix[k](ind) = loss_list(k);
else
ic_matrix[k](ind) = loss_list(k);
}
}
if (fit_r) {
fit_r = false;
fit_arg.support_size = right;
Eigen::VectorXd loss_list = metric->fit_and_evaluate_in_metric(algorithm_list, data, fit_arg);
loss_r = loss_list.mean();
// record: pos 2
support_size_list(++ind) = right;
for (int k = 0; k < Kfold; k++) {
beta_matrix[k](ind) = algorithm_list[k]->beta;
coef0_matrix[k](ind) = algorithm_list[k]->coef0;
train_loss_matrix[k](ind) = algorithm_list[k]->get_train_loss();
bd_matrix[k](ind) = algorithm_list[k]->bd;
effective_number_matrix[k](ind) = algorithm_list[k]->get_effective_number();
if (metric->is_cv)
test_loss_matrix[k](ind) = loss_list(k);
else
ic_matrix[k](ind) = loss_list(k);
}
}
// update split point
if (loss_l < loss_r) {
s_max = right;
right = left;
loss_r = loss_l;
left = round(0.618 * s_min + 0.382 * s_max);
fit_l = true;
} else {
s_min = left;
left = right;
loss_l = loss_r;
right = round(0.382 * s_min + 0.618 * s_max);
fit_r = true;
}
if (left == right) break;
}
// cout<<"left==right | s_min = "<<s_min<<" | s_max = "<<s_max<<endl;
T2 best_beta;
// T3 best_coef0;
// double best_train_loss = 0;
double best_loss = DBL_MAX;
for (int s = s_min; s <= s_max; s++) {
fit_arg.support_size = s;
fit_arg.beta_init = beta_init;
fit_arg.coef0_init = coef0_init;
fit_arg.bd_init = bd_init;
Eigen::VectorXd loss_list = metric->fit_and_evaluate_in_metric(algorithm_list, data, fit_arg);
double loss = loss_list.mean();
if (loss < best_loss) {
// record
support_size_list(++ind) = s;
best_loss = loss;
for (int k = 0; k < Kfold; k++) {
beta_matrix[k](ind) = algorithm_list[k]->beta;
coef0_matrix[k](ind) = algorithm_list[k]->coef0;
train_loss_matrix[k](ind) = algorithm_list[k]->get_train_loss();
bd_matrix[k](ind) = algorithm_list[k]->bd;
effective_number_matrix[k](ind) = algorithm_list[k]->get_effective_number();
if (metric->is_cv)
test_loss_matrix[k](ind) = loss_list(k);
else
ic_matrix[k](ind) = loss_list(k);
}
}
}
ind++;
for (int k = 0; k < Kfold; k++) {
result_list[k].beta_matrix = beta_matrix[k].block(0, 0, ind, 1);
result_list[k].coef0_matrix = coef0_matrix[k].block(0, 0, ind, 1);
result_list[k].train_loss_matrix = train_loss_matrix[k].block(0, 0, ind, 1);
result_list[k].bd_matrix = bd_matrix[k].block(0, 0, ind, 1);
result_list[k].ic_matrix = ic_matrix[k].block(0, 0, ind, 1);
result_list[k].test_loss_matrix = test_loss_matrix[k].block(0, 0, ind, 1);
result_list[k].effective_number_matrix = effective_number_matrix[k].block(0, 0, ind, 1);
}
// build sequence for gs
parameters.support_size_list = support_size_list.head(ind).eval();
parameters.lambda_list = parameters.lambda_list.head(1).eval();
parameters.build_sequence();
}
// double det(double a[], double b[]);
// calculate the intersection of two lines
// if parallal, need_flag = false.
// void line_intersection(double line1[2][2], double line2[2][2], double intersection[], bool &need_flag);
// boundary: s=smin, s=max, lambda=lambda_min, lambda_max
// line: crosses p and is parallal to u
// calculate the intersections between boundary and line
// void cal_intersections(double p[], double u[], int s_min, int s_max, double lambda_min, double lambda_max, int a[],
// int b[]);
// template <class T1, class T2, class T3>
// void golden_section_search(Data<T1, T2, T3> &data, Algorithm<T1, T2, T3> *algorithm, Metric<T1, T2, T3> *metric,
// double p[], double u[], int s_min, int s_max, double log_lambda_min, double log_lambda_max, double best_arg[],
// T2 &beta1, T3 &coef01, double &train_loss1, double &ic1, Eigen::MatrixXd &ic_sequence);
// template <class T1, class T2, class T3>
// void seq_search(Data<T1, T2, T3> &data, Algorithm<T1, T2, T3> *algorithm, Metric<T1, T2, T3> *metric, double p[],
// double u[], int s_min, int s_max, double log_lambda_min, double log_lambda_max, double best_arg[],
// T2 &beta1, T3 &coef01, double &train_loss1, double &ic1, int nlambda, Eigen::MatrixXd &ic_sequence);
// List pgs_path(Data &data, Algorithm *algorithm, Metric *metric, int s_min, int s_max, double log_lambda_min, double
// log_lambda_max, int powell_path, int nlambda);
#endif // SRC_PATH_H
| 12,293 | 38.277955 | 119 | h |
abess | abess-master/src/screening.h | #ifndef SRC_SCREENING_H
#define SRC_SCREENING_H
// #define R_BUILD
#ifdef R_BUILD
#include <Rcpp.h>
#include <RcppEigen.h>
using namespace Rcpp;
// [[Rcpp::depends(RcppEigen)]]
#else
#include <Eigen/Eigen>
#endif
#include <algorithm>
#include <cfloat>
#include <iostream>
#include "Data.h"
#include "utilities.h"
using namespace std;
using namespace Eigen;
template <class T1, class T2, class T3, class T4>
Eigen::VectorXi screening(Data<T1, T2, T3, T4> &data, std::vector<Algorithm<T1, T2, T3, T4> *> algorithm_list,
int screening_size, int &beta_size, double lambda, Eigen::VectorXi &A_init) {
int n = data.n;
int M = data.M;
int g_num = data.g_num;
Eigen::VectorXi g_size = data.g_size;
Eigen::VectorXi g_index = data.g_index;
Eigen::VectorXi always_select = algorithm_list[0]->always_select;
Eigen::VectorXi screening_A(screening_size);
Eigen::VectorXd coef_norm = Eigen::VectorXd::Zero(g_num);
T2 beta_init;
T3 coef0_init;
Eigen::VectorXd bd_init;
for (int i = 0; i < g_num; i++) {
int p_tmp = g_size(i);
T4 x_tmp = data.x.middleCols(g_index(i), p_tmp);
Eigen::VectorXi g_index_tmp = Eigen::VectorXi::LinSpaced(p_tmp, 0, p_tmp - 1);
Eigen::VectorXi g_size_tmp = Eigen::VectorXi::Ones(p_tmp);
coef_set_zero(p_tmp, M, beta_init, coef0_init);
algorithm_list[0]->update_sparsity_level(p_tmp);
algorithm_list[0]->update_lambda_level(lambda);
algorithm_list[0]->update_beta_init(beta_init);
algorithm_list[0]->update_bd_init(bd_init);
algorithm_list[0]->update_coef0_init(coef0_init);
algorithm_list[0]->update_A_init(A_init, p_tmp);
algorithm_list[0]->fit(x_tmp, data.y, data.weight, g_index_tmp, g_size_tmp, n, p_tmp, p_tmp);
T2 beta = algorithm_list[0]->beta;
coef_norm(i) = beta.squaredNorm() / p_tmp;
}
// keep always_select in active_set
slice_assignment(coef_norm, always_select, DBL_MAX);
screening_A = max_k(coef_norm, screening_size);
// data after screening
Eigen::VectorXi new_g_index(screening_size);
Eigen::VectorXi new_g_size(screening_size);
int new_p = 0;
for (int i = 0; i < screening_size; i++) {
new_p += g_size(screening_A(i));
new_g_size(i) = g_size(screening_A(i));
}
new_g_index(0) = 0;
for (int i = 0; i < screening_size - 1; i++) {
new_g_index(i + 1) = new_g_index(i) + g_size(screening_A(i));
}
Eigen::VectorXi screening_A_ind = find_ind(screening_A, g_index, g_size, beta_size, g_num);
T4 x_A;
slice(data.x, screening_A_ind, x_A, 1);
Eigen::VectorXd new_x_mean, new_x_norm;
slice(data.x_mean, screening_A_ind, new_x_mean);
slice(data.x_norm, screening_A_ind, new_x_norm);
data.x = x_A;
data.x_mean = new_x_mean;
data.x_norm = new_x_norm;
data.p = new_p;
data.g_num = screening_size;
data.g_index = new_g_index;
data.g_size = new_g_size;
beta_size = algorithm_list[0]->get_beta_size(n, new_p);
if (always_select.size() != 0) {
Eigen::VectorXi new_always_select(always_select.size());
int j = 0;
for (int i = 0; i < always_select.size(); i++) {
while (always_select(i) != screening_A(j)) j++;
new_always_select(i) = j;
}
int algorithm_list_size = algorithm_list.size();
for (int i = 0; i < algorithm_list_size; i++) {
algorithm_list[i]->always_select = new_always_select;
}
}
algorithm_list[0]->clear_setting();
return screening_A_ind;
}
#endif // SRC_SCREENING_H
| 3,639 | 30.37931 | 110 | h |
abess | abess-master/src/utilities.cpp | //
// Created by jiangkangkang on 2020/3/9.
//
#ifndef R_BUILD
#include <Eigen/Eigen>
#include <unsupported/Eigen/MatrixFunctions>
#else
#include <RcppEigen.h>
#endif
#include <string.h>
#include <algorithm>
#include <iostream>
using namespace std;
using namespace Eigen;
Eigen::VectorXi find_ind(Eigen::VectorXi &L, Eigen::VectorXi &gindex, Eigen::VectorXi &gsize, int beta_size, int N) {
if (L.size() == N) {
return Eigen::VectorXi::LinSpaced(beta_size, 0, beta_size - 1);
} else {
int mark = 0;
Eigen::VectorXi ind = Eigen::VectorXi::Zero(beta_size);
for (int i = 0; i < L.size(); i++) {
ind.segment(mark, gsize(L(i))) =
Eigen::VectorXi::LinSpaced(gsize(L(i)), gindex(L(i)), gindex(L(i)) + gsize(L(i)) - 1);
mark = mark + gsize(L(i));
}
return ind.head(mark).eval();
}
}
Eigen::Matrix<Eigen::MatrixXd, -1, -1> invPhi(Eigen::Matrix<Eigen::MatrixXd, -1, -1> &Phi, int N) {
Eigen::Matrix<Eigen::MatrixXd, -1, -1> invPhi(N, 1);
int row;
for (int i = 0; i < N; i++) {
row = (Phi(i, 0)).rows();
invPhi(i, 0) = (Phi(i, 0)).ldlt().solve(Eigen::MatrixXd::Identity(row, row));
}
return invPhi;
}
void slice_assignment(Eigen::VectorXd &nums, Eigen::VectorXi &ind, double value) {
if (ind.size() != 0) {
for (int i = 0; i < ind.size(); i++) {
nums(ind(i)) = value;
}
}
return;
}
// Eigen::VectorXd vector_slice(Eigen::VectorXd &nums, Eigen::VectorXi &ind)
// {
// Eigen::VectorXd sub_nums(ind.size());
// if (ind.size() != 0)
// {
// for (int i = 0; i < ind.size(); i++)
// {
// sub_nums(i) = nums(ind(i));
// }
// }
// return sub_nums;
// }
Eigen::VectorXi vector_slice(Eigen::VectorXi &nums, Eigen::VectorXi &ind) {
Eigen::VectorXi sub_nums(ind.size());
if (ind.size() != 0) {
for (int i = 0; i < ind.size(); i++) {
sub_nums(i) = nums(ind(i));
}
}
return sub_nums;
}
// Eigen::MatrixXd matrix_slice(Eigen::MatrixXd &nums, Eigen::VectorXi &ind, int axis)
// {
// if (axis == 0)
// {
// Eigen::MatrixXd sub_nums(ind.size(), nums.cols());
// if (ind.size() != 0)
// {
// for (int i = 0; i < ind.size(); i++)
// {
// sub_nums.row(i) = nums.row(ind(i));
// }
// }
// return sub_nums;
// }
// else
// {
// Eigen::MatrixXd sub_nums(nums.rows(), ind.size());
// if (ind.size() != 0)
// {
// for (int i = 0; i < ind.size(); i++)
// {
// sub_nums.col(i) = nums.col(ind(i));
// }
// }
// return sub_nums;
// }
// }
// Eigen::MatrixXd row_slice(Eigen::MatrixXd &nums, Eigen::VectorXi &ind)
// {
// Eigen::MatrixXd sub_nums(ind.size(), nums.cols());
// if (ind.size() != 0)
// {
// for (int i = 0; i < ind.size(); i++)
// {
// sub_nums.row(i) = nums.row(ind(i));
// }
// }
// return sub_nums;
// }
// Eigen::VectorXi get_value_index(Eigen::VectorXd &nums, double value)
// {
// Eigen::VectorXi ind(nums.size());
// int cur_index = 0;
// for (int i = 0; i < nums.size(); i++)
// {
// if (nums(i) == value)
// {
// ind(cur_index) = i;
// cur_index += 1;
// }
// }
// return ind.head(cur_index).eval();
// }
// replace B by C in A
// to do : binary search
Eigen::VectorXi diff_union(Eigen::VectorXi A, Eigen::VectorXi &B, Eigen::VectorXi &C) {
unsigned int k;
for (unsigned int i = 0; i < B.size(); i++) {
for (k = 0; k < A.size(); k++) {
if (B(i) == A(k)) {
A(k) = C(i);
break;
}
}
}
sort(A.data(), A.data() + A.size());
return A;
}
Eigen::VectorXi min_k(Eigen::VectorXd &vec, int k, bool sort_by_value) {
Eigen::VectorXi ind = Eigen::VectorXi::LinSpaced(vec.size(), 0, vec.size() - 1); // [0 1 2 3 ... N-1]
auto rule = [vec](int i, int j) -> bool { return vec(i) < vec(j); }; // sort rule
std::nth_element(ind.data(), ind.data() + k, ind.data() + ind.size(), rule);
if (sort_by_value) {
std::sort(ind.data(), ind.data() + k, rule);
} else {
std::sort(ind.data(), ind.data() + k);
}
return ind.head(k).eval();
}
Eigen::VectorXi max_k(Eigen::VectorXd &vec, int k, bool sort_by_value) {
Eigen::VectorXi ind = Eigen::VectorXi::LinSpaced(vec.size(), 0, vec.size() - 1); // [0 1 2 3 ... N-1]
auto rule = [vec](int i, int j) -> bool { return vec(i) > vec(j); }; // sort rule
std::nth_element(ind.data(), ind.data() + k, ind.data() + ind.size(), rule);
if (sort_by_value) {
std::sort(ind.data(), ind.data() + k, rule);
} else {
std::sort(ind.data(), ind.data() + k);
}
return ind.head(k).eval();
}
// Eigen::VectorXi max_k_2(Eigen::VectorXd &vec, int k)
// {
// Eigen::VectorXi ind = Eigen::VectorXi::LinSpaced(vec.size(), 0, vec.size() - 1); //[0 1 2 3 ... N-1]
// auto rule = [vec](int i, int j) -> bool
// {
// return vec(i) > vec(j);
// }; // sort rule
// std::nth_element(ind.data(), ind.data() + k, ind.data() + ind.size(), rule);
// std::sort(ind.data(), ind.data() + k);
// return ind.head(k).eval();
// }
// complement
Eigen::VectorXi complement(Eigen::VectorXi &A, int N) {
int A_size = A.size();
if (A_size == 0) {
return Eigen::VectorXi::LinSpaced(N, 0, N - 1);
} else if (A_size == N) {
Eigen::VectorXi I(0);
return I;
} else {
Eigen::VectorXi I(N - A_size);
int cur_index = 0;
int A_index = 0;
for (int i = 0; i < N; i++) {
if (A_index >= A_size) {
I(cur_index) = i;
cur_index += 1;
continue;
}
if (i != A(A_index)) {
I(cur_index) = i;
cur_index += 1;
} else {
A_index += 1;
}
}
return I;
}
}
// // Ac
// Eigen::VectorXi Ac(Eigen::VectorXi &A, Eigen::VectorXi &U)
// {
// int A_size = A.size();
// int N = U.size();
// if (A_size == 0)
// {
// return U;
// }
// else if (A_size == N)
// {
// Eigen::VectorXi I(0);
// return I;
// }
// else
// {
// Eigen::VectorXi I(N - A_size);
// int cur_index = 0;
// int A_index = 0;
// for (int i = 0; i < N; i++)
// {
// if (A_index < A.size() && U(i) == A(A_index))
// {
// A_index += 1;
// continue;
// }
// else
// {
// I(cur_index) = U(i);
// cur_index += 1;
// }
// }
// return I;
// }
// }
void slice(Eigen::VectorXd &nums, Eigen::VectorXi &ind, Eigen::VectorXd &A, int axis) {
if (ind.size() == 0) {
A = Eigen::VectorXd::Zero(0);
} else {
A = Eigen::VectorXd::Zero(ind.size());
for (int i = 0; i < ind.size(); i++) {
A(i) = nums(ind(i));
}
}
return;
}
void slice(Eigen::MatrixXd &nums, Eigen::VectorXi &ind, Eigen::MatrixXd &A, int axis) {
if (axis == 0) {
A = Eigen::MatrixXd::Zero(ind.size(), nums.cols());
if (ind.size() != 0) {
for (int i = 0; i < ind.size(); i++) {
A.row(i) = nums.row(ind(i));
}
}
} else {
A = Eigen::MatrixXd::Zero(nums.rows(), ind.size());
if (ind.size() != 0) {
for (int i = 0; i < ind.size(); i++) {
A.col(i) = nums.col(ind(i));
}
}
}
return;
}
void slice(Eigen::SparseMatrix<double> &nums, Eigen::VectorXi &ind, Eigen::SparseMatrix<double> &A, int axis) {
if (axis == 0) {
Eigen::SparseMatrix<double, Eigen::RowMajor> nums_row(nums);
Eigen::SparseMatrix<double, Eigen::RowMajor> A_row(ind.size(), nums.cols());
A_row.reserve(nums.nonZeros());
if (ind.size() != 0) {
for (int i = 0; i < ind.size(); i++) {
A_row.row(i) = nums_row.row(ind(i));
}
}
A = A_row;
} else {
A.resize(nums.rows(), ind.size());
A.reserve(nums.nonZeros());
if (ind.size() != 0) {
for (int i = 0; i < ind.size(); i++) {
A.col(i) = nums.col(ind(i));
}
}
}
return;
}
void slice_restore(Eigen::VectorXd &A, Eigen::VectorXi &ind, Eigen::VectorXd &nums, int axis) {
if (ind.size() == 0) {
nums = Eigen::VectorXd::Zero(nums.size());
} else {
nums = Eigen::VectorXd::Zero(nums.size());
for (int i = 0; i < ind.size(); i++) {
nums(ind(i)) = A(i);
}
}
return;
}
void slice_restore(Eigen::MatrixXd &A, Eigen::VectorXi &ind, Eigen::MatrixXd &nums, int axis) {
if (axis == 0) {
nums = Eigen::MatrixXd::Zero(nums.rows(), nums.cols());
if (ind.size() != 0) {
for (int i = 0; i < ind.size(); i++) {
nums.row(ind(i)) = A.row(i);
}
}
} else {
nums = Eigen::MatrixXd::Zero(nums.rows(), nums.cols());
if (ind.size() != 0) {
for (int i = 0; i < ind.size(); i++) {
nums.col(ind(i)) = A.col(i);
}
}
}
return;
}
void coef_set_zero(int p, int M, Eigen::VectorXd &beta, double &coef0) {
beta = Eigen::VectorXd::Zero(p);
coef0 = 0.;
return;
}
void coef_set_zero(int p, int M, Eigen::MatrixXd &beta, Eigen::VectorXd &coef0) {
beta = Eigen::MatrixXd::Zero(p, M);
coef0 = Eigen::VectorXd::Zero(M);
return;
}
Eigen::VectorXd array_product(Eigen::VectorXd &A, Eigen::VectorXd &B, int axis) {
A = A.array() * B.array();
return A;
}
Eigen::MatrixXd array_product(Eigen::MatrixXd &A, Eigen::VectorXd &B, int axis) {
if (axis == 0) {
for (int i = 0; i < A.rows(); i++) {
A.row(i) = A.row(i).array() * B.array();
}
} else {
for (int i = 0; i < A.cols(); i++) {
A.col(i) = A.col(i).array() * B.array();
}
}
return A;
}
// Eigen::SparseMatrix<double> array_product(Eigen::SparseMatrix<double> &A, Eigen::VectorXd &B, int axis)
// {
// for (int i = 0; i < A.cols(); i++)
// {
// A.col(i) = A.col(i) * B;
// }
// return A;
// }
void array_quotient(Eigen::VectorXd &A, Eigen::VectorXd &B, int axis) {
A = A.array() / B.array();
return;
}
void array_quotient(Eigen::MatrixXd &A, Eigen::VectorXd &B, int axis) {
if (axis == 0) {
for (int i = 0; i < A.rows(); i++) {
A.row(i) = A.row(i).array() / B.array();
}
} else {
for (int i = 0; i < A.cols(); i++) {
A.col(i) = A.col(i).array() / B.array();
}
}
return;
}
double matrix_dot(Eigen::VectorXd &A, Eigen::VectorXd &B) { return A.dot(B); }
Eigen::VectorXd matrix_dot(Eigen::MatrixXd &A, Eigen::VectorXd &B) { return A.transpose() * B; }
// void matrix_sqrt(Eigen::MatrixXd &A, Eigen::MatrixXd &B)
// {
// A.sqrt().evalTo(B);
// }
// void matrix_sqrt(Eigen::SparseMatrix<double> &A, Eigen::MatrixXd &B)
// {
// if (A.rows() == 1)
// {
// B = Eigen::MatrixXd::Ones(1, 1) * A.cwiseSqrt();
// }
// else
// {
// Eigen::SelfAdjointEigenSolver<Eigen::SparseMatrix<double>>
// adjoint_eigen_solver(A);
// // const auto &eigenvalues = adjoint_eigen_solver.eigenvalues();
// // CHECK_GT(eigenvalues.minCoeff(), -1e-5) //R.minCoeff() 意思是 min(R(:))最小值
// // << "MatrixSqrt failed with negative eigenvalues: "
// // << eigenvalues.transpose();
// B = adjoint_eigen_solver.eigenvectors() * (adjoint_eigen_solver.eigenvalues().cwiseSqrt().asDiagonal()) *
// adjoint_eigen_solver.eigenvectors().transpose();
// // .cwiseMax(Eigen::Matrix<FloatType, N, 1>::Zero()) //R.cwiseMax(P)
// // .cwiseSqrt() // R.cwiseSqrt()
// // .asDiagonal() * // x.asDiagonal()
// // adjoint_eigen_solver.eigenvectors().transpose();
// }
// }
void add_constant_column(Eigen::MatrixXd &X) {
X.col(0) = Eigen::MatrixXd::Ones(X.rows(), 1);
return;
}
void add_constant_column(Eigen::SparseMatrix<double> &X) {
for (int i = 0; i < X.rows(); i++) {
X.insert(i, 0) = 1.0;
}
return;
}
// void set_nonzeros(Eigen::MatrixXd &X, Eigen::MatrixXd &x)
// {
// return;
// }
// void set_nonzeros(Eigen::SparseMatrix<double> &X, Eigen::SparseMatrix<double> &x)
// {
// X.reserve(x.nonZeros() + x.rows());
// }
// void overload_ldlt(Eigen::SparseMatrix<double> &X_new, Eigen::SparseMatrix<double> &X, Eigen::VectorXd &Z,
// Eigen::VectorXd &beta)
// {
// // Eigen::SparseMatrix<double> XTX = X_new.transpose() * X;
// // Eigen::SimplicialLDLT<Eigen::SparseMatrix<double>> solver;
// // solver.compute(X_new.transpose() * X);
// // beta = solver.solve(X_new.transpose() * Z);
// Eigen::MatrixXd XTX = X_new.transpose() * X;
// beta = (XTX).ldlt().solve(X_new.transpose() * Z);
// return;
// }
// void overload_ldlt(Eigen::SparseMatrix<double> &X_new, Eigen::SparseMatrix<double> &X, Eigen::MatrixXd &Z,
// Eigen::MatrixXd &beta)
// {
// // Eigen::SparseMatrix<double> XTX = X_new.transpose() * X;
// // Eigen::SimplicialLDLT<Eigen::SparseMatrix<double>> solver;
// // solver.compute(X_new.transpose() * X);
// // beta = solver.solve(X_new.transpose() * Z);
// Eigen::MatrixXd XTX = X_new.transpose() * X;
// beta = (XTX).ldlt().solve(X_new.transpose() * Z);
// return;
// }
// void overload_ldlt(Eigen::MatrixXd &X_new, Eigen::MatrixXd &X, Eigen::VectorXd &Z, Eigen::VectorXd &beta)
// {
// beta = (X_new.transpose() * X).ldlt().solve(X_new.transpose() * Z);
// return;
// }
// void overload_ldlt(Eigen::MatrixXd &X_new, Eigen::MatrixXd &X, Eigen::MatrixXd &Z, Eigen::MatrixXd &beta)
// {
// beta = (X_new.transpose() * X).ldlt().solve(X_new.transpose() * Z);
// return;
// }
// bool check_ill_condition(Eigen::MatrixXd &M){
// Eigen::JacobiSVD<Eigen::MatrixXd> svd(M);
// double l1 = svd.singularValues()(0);
// double l2 = svd.singularValues()(svd.singularValues().size()-1);
// return ((l2 == 0 || l1 / l2 > 1e+10) ? true : false);
// }
// to do
void add_weight(Eigen::MatrixXd &x, Eigen::VectorXd &y, Eigen::VectorXd weights) {
Eigen::VectorXd sqrt_weight = weights.array().sqrt();
int n = x.rows();
for (int i = 0; i < n; i++) {
x.row(i) = x.row(i) * sqrt_weight(i);
}
array_product(y, sqrt_weight, 1);
};
void add_weight(Eigen::MatrixXd &x, Eigen::MatrixXd &y, Eigen::VectorXd weights) {
Eigen::VectorXd sqrt_weight = weights.array().sqrt();
int n = x.rows();
for (int i = 0; i < n; i++) {
x.row(i) = x.row(i) * sqrt_weight(i);
}
array_product(y, sqrt_weight, 1);
};
void add_weight(Eigen::SparseMatrix<double> &x, Eigen::VectorXd &y, Eigen::VectorXd weights) {
for (int k = 0; k < x.outerSize(); ++k) {
for (SparseMatrix<double>::InnerIterator it(x, k); it; ++it) {
x.coeffRef(int(it.row()), int(it.col())) = x.coeffRef(int(it.row()), int(it.col())) * weights(it.row());
}
}
Eigen::VectorXd sqrt_weight = weights.array().sqrt();
array_product(y, sqrt_weight, 1);
};
void add_weight(Eigen::SparseMatrix<double> &x, Eigen::MatrixXd &y, Eigen::VectorXd weights) {
for (int k = 0; k < x.outerSize(); ++k) {
for (SparseMatrix<double>::InnerIterator it(x, k); it; ++it) {
x.coeffRef(int(it.row()), int(it.col())) = x.coeffRef(int(it.row()), int(it.col())) * weights(it.row());
}
}
Eigen::VectorXd sqrt_weight = weights.array().sqrt();
array_product(y, sqrt_weight, 1);
};
| 16,092 | 28.857143 | 117 | cpp |
abess | abess-master/src/utilities.h | //
// Created by jiangkangkang on 2020/3/9.
//
/**
* @file utilities.h
* @brief some utilities for abess package.
*/
#ifndef SRC_UTILITIES_H
#define SRC_UTILITIES_H
#ifndef R_BUILD
#include <Eigen/Eigen>
#include <unsupported/Eigen/MatrixFunctions>
#else
#include <Rcpp.h>
#include <RcppEigen.h>
#endif
#include <cfloat>
#include <iostream>
using namespace std;
using namespace Eigen;
/**
* @brief Save the sequential fitting result along the parameter searching.
* @details All matrix stored here have only one column, and each row correspond to a
* parameter combination in class Parameters.
* @tparam T2 for beta
* @tparam T3 for coef0
*/
template <class T2, class T3>
struct Result {
Eigen::Matrix<T2, Eigen::Dynamic, Eigen::Dynamic>
beta_matrix; /*!< Each value is the beta corrsponding a parameter combination */
Eigen::Matrix<T3, Eigen::Dynamic, Eigen::Dynamic>
coef0_matrix; /*!< Each value is the coef0 corrsponding a parameter combination */
Eigen::MatrixXd
ic_matrix; /*!< Each value is the information criterion value corrsponding a parameter combination */
Eigen::MatrixXd test_loss_matrix; /*!< Each value is the test loss corrsponding a parameter combination */
Eigen::MatrixXd train_loss_matrix; /*!< Each value is the train loss corrsponding a parameter combination */
Eigen::Matrix<Eigen::VectorXd, Eigen::Dynamic, Eigen::Dynamic>
bd_matrix; /*!< Each value is the sacrfice corrsponding a parameter combination */
Eigen::MatrixXd
effective_number_matrix; /*!< Each value is the effective number corrsponding a parameter combination */
};
template <class T2, class T3>
struct FIT_ARG {
int support_size;
double lambda;
T2 beta_init;
T3 coef0_init;
Eigen::VectorXd bd_init;
Eigen::VectorXi A_init;
FIT_ARG(int _support_size, double _lambda, T2 _beta_init, T3 _coef0_init, VectorXd _bd_init, VectorXi _A_init) {
support_size = _support_size;
lambda = _lambda;
beta_init = _beta_init;
coef0_init = _coef0_init;
bd_init = _bd_init;
A_init = _A_init;
};
FIT_ARG(){};
};
struct single_parameter {
int support_size;
double lambda;
single_parameter(){};
single_parameter(int support_size, double lambda) {
this->support_size = support_size;
this->lambda = lambda;
};
};
/**
* @brief Parameter list
* @details Store all parameters (e.g. `support_size`, `lambda`), and make a list of their combination.
* So that the algorithm can extract them one by one.
*/
class Parameters {
public:
Eigen::VectorXi support_size_list;
Eigen::VectorXd lambda_list;
int s_min = 0;
int s_max = 0;
Eigen::Matrix<single_parameter, -1, 1> sequence;
Parameters() {}
Parameters(Eigen::VectorXi &support_size_list, Eigen::VectorXd &lambda_list, int s_min, int s_max) {
this->support_size_list = support_size_list;
this->lambda_list = lambda_list;
this->s_min = s_min;
this->s_max = s_max;
if (support_size_list.size() > 0) {
// path = "seq"
this->build_sequence();
}
}
/**
* @brief build sequence with all combinations of parameters.
*/
void build_sequence() {
// suppose each input vector has size >= 1
int ind = 0;
int size1 = (this->support_size_list).size();
int size2 = (this->lambda_list).size();
(this->sequence).resize(size1 * size2, 1);
for (int i1 = 0; i1 < size1; i1++) { // other order?
for (int i2 = (1 - pow(-1, i1)) * (size2 - 1) / 2; i2 < size2 && i2 >= 0; i2 = i2 + pow(-1, i1)) {
int support_size = this->support_size_list(i1);
double lambda = this->lambda_list(i2);
single_parameter temp(support_size, lambda);
this->sequence(ind++) = temp;
}
}
}
void print_sequence() {
// for debug
#ifdef R_BUILD
Rcout << "==> Parameter List:" << endl;
#else
std::cout << "==> Parameter List:" << endl;
#endif
for (int i = 0; i < (this->sequence).size(); i++) {
int support_size = (this->sequence(i)).support_size;
double lambda = (this->sequence(i)).lambda;
#ifdef R_BUILD
Rcout << " support_size = " << support_size << ", lambda = " << lambda << endl;
#else
std::cout << " support_size = " << support_size << ", lambda = " << lambda << endl;
#endif
}
}
};
/**
* @brief return the indexes of all variables in groups in `L`.
*/
Eigen::VectorXi find_ind(Eigen::VectorXi &L, Eigen::VectorXi &index, Eigen::VectorXi &gsize, int beta_size, int N);
/**
* @brief return part of X, which only contains columns in `ind`.
*/
template <class T4>
T4 X_seg(T4 &X, int n, Eigen::VectorXi &ind, int model_type) {
if (ind.size() == X.cols() || model_type == 10 || model_type == 7) {
return X;
} else {
T4 X_new(n, ind.size());
for (int k = 0; k < ind.size(); k++) {
X_new.col(k) = X.col(ind(k));
}
return X_new;
}
};
// template <class T4>
// void X_seg(T4 &X, int n, Eigen::VectorXi &ind, T4 &X_seg)
// {
// if (ind.size() == X.cols())
// {
// X_seg = X;
// }
// else
// {
// X_seg.resize(n, ind.size());
// for (int k = 0; k < ind.size(); k++)
// {
// X_seg.col(k) = X.col(ind(k));
// }
// }
// };
template <class T4>
Eigen::Matrix<T4, -1, -1> compute_group_XTX(T4 &X, Eigen::VectorXi index, Eigen::VectorXi gsize, int n, int p, int N) {
Eigen::Matrix<T4, -1, -1> XTX(N, 1);
for (int i = 0; i < N; i++) {
T4 X_ind = X.block(0, index(i), n, gsize(i));
XTX(i, 0) = X_ind.transpose() * X_ind;
}
return XTX;
}
template <class T4>
Eigen::Matrix<Eigen::MatrixXd, -1, -1> Phi(T4 &X, Eigen::VectorXi index, Eigen::VectorXi gsize, int n, int p, int N,
double lambda, Eigen::Matrix<T4, -1, -1> group_XTX) {
Eigen::Matrix<Eigen::MatrixXd, -1, -1> phi(N, 1);
for (int i = 0; i < N; i++) {
Eigen::MatrixXd lambda_XtX =
2 * lambda * Eigen::MatrixXd::Identity(gsize(i), gsize(i)) + group_XTX(i, 0) / double(n);
lambda_XtX.sqrt().evalTo(phi(i, 0));
}
return phi;
}
Eigen::Matrix<Eigen::MatrixXd, -1, -1> invPhi(Eigen::Matrix<Eigen::MatrixXd, -1, -1> &Phi, int N);
// void max_k(Eigen::VectorXd &vec, int k, Eigen::VectorXi &result);
void slice_assignment(Eigen::VectorXd &nums, Eigen::VectorXi &ind, double value);
// Eigen::VectorXi get_value_index(Eigen::VectorXd &nums, double value);
// Eigen::VectorXd vector_slice(Eigen::VectorXd &nums, Eigen::VectorXi &ind);
Eigen::VectorXi vector_slice(Eigen::VectorXi &nums, Eigen::VectorXi &ind);
// Eigen::MatrixXd row_slice(Eigen::MatrixXd &nums, Eigen::VectorXi &ind);
// Eigen::MatrixXd matrix_slice(Eigen::MatrixXd &nums, Eigen::VectorXi &ind, int axis);
// Eigen::MatrixXd X_seg(Eigen::MatrixXd &X, int n, Eigen::VectorXi &ind);
/**
* @brief complement of A, the whole set is {0..N-1}
*/
Eigen::VectorXi complement(Eigen::VectorXi &A, int N);
// Eigen::VectorXi Ac(Eigen::VectorXi &A, Eigen::VectorXi &U);
/**
* @brief replace `B` by `C` in `A`
*/
Eigen::VectorXi diff_union(Eigen::VectorXi A, Eigen::VectorXi &B, Eigen::VectorXi &C);
/**
* @brief return the indexes of min `k` values in `nums`.
*/
Eigen::VectorXi min_k(Eigen::VectorXd &nums, int k, bool sort_by_value = false);
/**
* @brief return the indexes of max `k` values in `nums`.
*/
Eigen::VectorXi max_k(Eigen::VectorXd &nums, int k, bool sort_by_value = false);
// Eigen::VectorXi max_k_2(Eigen::VectorXd &vec, int k);
/**
* @brief Extract `nums` at `ind` position, and store in `A`.
*/
void slice(Eigen::VectorXd &nums, Eigen::VectorXi &ind, Eigen::VectorXd &A, int axis = 0);
void slice(Eigen::MatrixXd &nums, Eigen::VectorXi &ind, Eigen::MatrixXd &A, int axis = 0);
void slice(Eigen::SparseMatrix<double> &nums, Eigen::VectorXi &ind, Eigen::SparseMatrix<double> &A, int axis = 0);
/**
* @brief The inverse action of function slice.
*/
void slice_restore(Eigen::VectorXd &A, Eigen::VectorXi &ind, Eigen::VectorXd &nums, int axis = 0);
void slice_restore(Eigen::MatrixXd &A, Eigen::VectorXi &ind, Eigen::MatrixXd &nums, int axis = 0);
void coef_set_zero(int p, int M, Eigen::VectorXd &beta, double &coef0);
void coef_set_zero(int p, int M, Eigen::MatrixXd &beta, Eigen::VectorXd &coef0);
/**
* @brief element-wise product: A.array() * B.array().
*/
Eigen::VectorXd array_product(Eigen::VectorXd &A, Eigen::VectorXd &B, int axis = 0);
/**
* @brief product by specific axis.
*/
Eigen::MatrixXd array_product(Eigen::MatrixXd &A, Eigen::VectorXd &B, int axis = 0);
// Eigen::SparseMatrix<double> array_product(Eigen::SparseMatrix<double> &A, Eigen::VectorXd &B, int axis = 0);
/**
* @brief element-wise division: A.array() / B.array().
*/
void array_quotient(Eigen::VectorXd &A, Eigen::VectorXd &B, int axis = 0);
/**
* @brief division by specific axis.
*/
void array_quotient(Eigen::MatrixXd &A, Eigen::VectorXd &B, int axis = 0);
/**
* @brief A.dot(B)
*/
double matrix_dot(Eigen::VectorXd &A, Eigen::VectorXd &B);
/**
* @brief A.transpose() * B
*/
Eigen::VectorXd matrix_dot(Eigen::MatrixXd &A, Eigen::VectorXd &B);
// void matrix_sqrt(Eigen::MatrixXd &A, Eigen::MatrixXd &B);
// void matrix_sqrt(Eigen::SparseMatrix<double> &A, Eigen::MatrixXd &B);
/**
* @brief Add an all-ones column as the first column in X.
*/
void add_constant_column(Eigen::MatrixXd &X);
/**
* @brief Add an all-ones column as the first column in X.
*/
void add_constant_column(Eigen::SparseMatrix<double> &X);
// void set_nonzeros(Eigen::MatrixXd &X, Eigen::MatrixXd &x);
// void set_nonzeros(Eigen::SparseMatrix<double> &X, Eigen::SparseMatrix<double> &x);
// void overload_ldlt(Eigen::SparseMatrix<double> &X_new, Eigen::SparseMatrix<double> &X, Eigen::VectorXd &Z,
// Eigen::VectorXd &beta); void overload_ldlt(Eigen::MatrixXd &X_new, Eigen::MatrixXd &X, Eigen::VectorXd &Z,
// Eigen::VectorXd &beta);
// void overload_ldlt(Eigen::SparseMatrix<double> &X_new, Eigen::SparseMatrix<double> &X, Eigen::MatrixXd &Z,
// Eigen::MatrixXd &beta); void overload_ldlt(Eigen::MatrixXd &X_new, Eigen::MatrixXd &X, Eigen::MatrixXd &Z,
// Eigen::MatrixXd &beta);
// bool check_ill_condition(Eigen::MatrixXd &M);
/**
* @brief If enable normalization, restore coefficients after fitting.
*/
template <class T2, class T3>
void restore_for_normal(T2 &beta, T3 &coef0, Eigen::Matrix<T2, Dynamic, 1> &beta_matrix,
Eigen::Matrix<T3, Dynamic, 1> &coef0_matrix, bool sparse_matrix, int normalize_type, int n,
Eigen::VectorXd x_mean, T3 y_mean, Eigen::VectorXd x_norm) {
if (normalize_type == 0 || sparse_matrix) {
// no need to restore
return;
}
int sequence_size = beta_matrix.rows();
if (normalize_type == 1) {
array_quotient(beta, x_norm, 1);
beta = beta * sqrt(double(n));
coef0 = y_mean - matrix_dot(beta, x_mean);
for (int ind = 0; ind < sequence_size; ind++) {
array_quotient(beta_matrix(ind), x_norm, 1);
beta_matrix(ind) = beta_matrix(ind) * sqrt(double(n));
coef0_matrix(ind) = y_mean - matrix_dot(beta_matrix(ind), x_mean);
}
} else if (normalize_type == 2) {
array_quotient(beta, x_norm, 1);
beta = beta * sqrt(double(n));
coef0 = coef0 - matrix_dot(beta, x_mean);
for (int ind = 0; ind < sequence_size; ind++) {
array_quotient(beta_matrix(ind), x_norm, 1);
beta_matrix(ind) = beta_matrix(ind) * sqrt(double(n));
coef0_matrix(ind) = coef0_matrix(ind) - matrix_dot(beta_matrix(ind), x_mean);
}
} else {
array_quotient(beta, x_norm, 1);
beta = beta * sqrt(double(n));
for (int ind = 0; ind < sequence_size; ind++) {
array_quotient(beta_matrix(ind), x_norm, 1);
beta_matrix(ind) = beta_matrix(ind) * sqrt(double(n));
}
}
return;
}
template <class T4>
Eigen::VectorXd pi(T4 &X, Eigen::VectorXd &y, Eigen::VectorXd &coef) {
int p = coef.size();
int n = X.rows();
Eigen::VectorXd Pi = Eigen::VectorXd::Zero(n);
if (X.cols() == p - 1) {
Eigen::VectorXd intercept = Eigen::VectorXd::Ones(n) * coef(0);
Eigen::VectorXd one = Eigen::VectorXd::Ones(n);
Eigen::VectorXd eta = X * (coef.tail(p - 1).eval()) + intercept;
for (int i = 0; i < n; i++) {
if (eta(i) > 30) {
eta(i) = 30;
} else if (eta(i) < -30) {
eta(i) = -30;
}
}
Eigen::VectorXd expeta = eta.array().exp();
Pi = expeta.array() / (one + expeta).array();
return Pi;
} else {
Eigen::VectorXd eta = X * coef;
Eigen::VectorXd one = Eigen::VectorXd::Ones(n);
for (int i = 0; i < n; i++) {
if (eta(i) > 30) {
eta(i) = 30;
} else if (eta(i) < -30) {
eta(i) = -30;
}
}
Eigen::VectorXd expeta = eta.array().exp();
Pi = expeta.array() / (one + expeta).array();
return Pi;
}
}
template <class T4>
void pi(T4 &X, Eigen::MatrixXd &y, Eigen::MatrixXd &beta, Eigen::VectorXd &coef0, Eigen::MatrixXd &pr) {
int n = X.rows();
Eigen::MatrixXd one = Eigen::MatrixXd::Ones(n, 1);
Eigen::MatrixXd Xbeta = X * beta + one * coef0.transpose();
pr = Xbeta.array().exp();
Eigen::VectorXd sumpi = pr.rowwise().sum();
for (int i = 0; i < n; i++) {
pr.row(i) = pr.row(i) / sumpi(i);
}
// return pi;
};
template <class T4>
void pi(T4 &X, Eigen::MatrixXd &y, Eigen::MatrixXd &coef, Eigen::MatrixXd &pr) {
int n = X.rows();
// Eigen::MatrixXd one = Eigen::MatrixXd::Ones(n, 1);
Eigen::MatrixXd Xbeta = X * coef;
pr = Xbeta.array().exp();
Eigen::VectorXd sumpi = pr.rowwise().sum();
for (int i = 0; i < n; i++) {
pr.row(i) = pr.row(i) / sumpi(i);
}
// return pi;
};
/**
* @brief Add weights information into data.
*/
void add_weight(Eigen::MatrixXd &x, Eigen::VectorXd &y, Eigen::VectorXd weights);
/**
* @brief Add weights information into data.
*/
void add_weight(Eigen::MatrixXd &x, Eigen::MatrixXd &y, Eigen::VectorXd weights);
/**
* @brief Add weights information into data.
*/
void add_weight(Eigen::SparseMatrix<double> &x, Eigen::VectorXd &y, Eigen::VectorXd weights);
/**
* @brief Add weights information into data.
*/
void add_weight(Eigen::SparseMatrix<double> &x, Eigen::MatrixXd &y, Eigen::VectorXd weights);
void add_constant_column(Eigen::MatrixXd &X_full, Eigen::MatrixXd &X, bool add_constant) {
if (!add_constant) {
X_full = X.eval();
return;
}
X_full.resize(X.rows(), X.cols() + 1);
X_full.rightCols(X.cols()) = X.eval();
X_full.col(0) = Eigen::MatrixXd::Ones(X.rows(), 1);
return;
}
void add_constant_column(Eigen::SparseMatrix<double> &X_full, Eigen::SparseMatrix<double> &X, bool add_constant) {
if (!add_constant) {
X_full = X.eval();
return;
}
X_full.resize(X.rows(), X.cols() + 1);
X_full.rightCols(X.cols()) = X.eval();
for (int i = 0; i < X.rows(); i++) {
X_full.insert(i, 0) = 1.0;
}
return;
}
void combine_beta_coef0(Eigen::VectorXd &beta_full, Eigen::VectorXd &beta, double &coef0, bool add_constant) {
if (!add_constant) {
beta_full = beta.eval();
return;
}
int p = beta.rows();
beta_full.resize(p + 1);
beta_full(0) = coef0;
beta_full.tail(p) = beta.eval();
return;
}
void combine_beta_coef0(Eigen::MatrixXd &beta_full, Eigen::MatrixXd &beta, Eigen::VectorXd &coef0, bool add_constant) {
if (!add_constant) {
beta_full = beta.eval();
return;
}
int p = beta.rows();
int M = beta.cols();
beta_full.resize(p + 1, M);
beta_full.row(0) = coef0.transpose().eval();
beta_full.bottomRows(p) = beta.eval();
return;
}
void extract_beta_coef0(Eigen::VectorXd &beta_full, Eigen::VectorXd &beta, double &coef0, bool add_constant) {
if (!add_constant) {
beta = beta_full.eval();
coef0 = 0;
return;
}
int p = beta_full.rows() - 1;
coef0 = beta_full(0);
beta = beta_full.tail(p).eval();
return;
}
void extract_beta_coef0(Eigen::MatrixXd &beta_full, Eigen::MatrixXd &beta, Eigen::VectorXd &coef0, bool add_constant) {
if (!add_constant) {
beta = beta_full.eval();
coef0 = Eigen::VectorXd::Zero(beta_full.cols());
return;
}
int p = beta_full.rows() - 1;
coef0 = beta_full.row(0).transpose().eval();
beta = beta_full.bottomRows(p).eval();
return;
}
void trunc(double &value, double *trunc_range) {
if (value < trunc_range[0]) value = trunc_range[0];
if (value > trunc_range[1]) value = trunc_range[1];
}
void trunc(Eigen::VectorXd &vec, double *trunc_range) {
for (int i = 0; i < vec.size(); i++) {
trunc(vec(i), trunc_range);
}
}
Eigen::MatrixXd rowwise_add(Eigen::MatrixXd &m, Eigen::VectorXd &v) { return m.rowwise() + v.transpose(); }
Eigen::MatrixXd rowwise_add(Eigen::MatrixXd &m, double &v) {
Eigen::VectorXd ones = Eigen::VectorXd::Ones(m.cols());
return m.rowwise() + ones.transpose() * v;
}
#endif // SRC_UTILITIES_H
| 17,518 | 32.755299 | 119 | h |
abess | abess-master/src/workflow.h | /**
* @file workflow.h
* @brief The main workflow for abess.
* @details It receives all inputs from API, runs the whole abess process
* and then return the results as a list.
*/
#ifndef SRC_WORKFLOW_H
#define SRC_WORKFLOW_H
// #define R_BUILD
#ifdef R_BUILD
#include <Rcpp.h>
#include <RcppEigen.h>
// [[Rcpp::depends(RcppEigen)]]
using namespace Rcpp;
#else
#include <Eigen/Eigen>
#include "List.h"
#endif
#include <iostream>
#include <vector>
#include "Algorithm.h"
#include "Data.h"
#include "Metric.h"
#include "abessOpenMP.h"
#include "path.h"
#include "screening.h"
#include "utilities.h"
typedef Eigen::Triplet<double> triplet;
using namespace Eigen;
using namespace std;
// <Eigen::VectorXd, Eigen::VectorXd, double, Eigen::MatrixXd> for Univariate Dense
// <Eigen::VectorXd, Eigen::VectorXd, double, Eigen::SparseMatrix<double> > for Univariate Sparse
// <Eigen::MatrixXd, Eigen::MatrixXd, Eigen::VectorXd, Eigen::MatrixXd> for Multivariable Dense
// <Eigen::MatrixXd, Eigen::MatrixXd, Eigen::VectorXd, Eigen::SparseMatrix<double> > for Multivariable Sparse
/**
* @brief The main workflow for abess.
* @tparam T1 for y, XTy, XTone
* @tparam T2 for beta
* @tparam T3 for coef0
* @tparam T4 for X
* @param x sample matrix
* @param y response matrix
* @param n sample size
* @param p number of variables
* @param normalize_type type of normalize
* @param weight weight of each sample
* @param algorithm_type type of algorithm
* @param path_type type of path: 1 for sequencial search and 2 for golden section search
* @param is_warm_start whether enable warm-start
* @param eval_type type of information criterion, or test loss in CV
* @param Kfold number of folds, used for CV
* @param parameters parameters to be selected, including `support_size`, `lambda`
* @param screening_size size of screening
* @param g_index the first position of each group
* @param early_stop whether enable early-stop
* @param thread number of threads used for parallel computing
* @param sparse_matrix whether sample matrix `x` is sparse matrix
* @param cv_fold_id user-specified cross validation division
* @param A_init initial active set
* @param algorithm_list the algorithm pointer
* @return the result of abess, including the best model parameters
*/
template <class T1, class T2, class T3, class T4>
List abessWorkflow(T4 &x, T1 &y, int n, int p, int normalize_type, Eigen::VectorXd weight, int algorithm_type,
int path_type, bool is_warm_start, int eval_type, double ic_coef, int Kfold, Parameters parameters,
int screening_size, Eigen::VectorXi g_index, bool early_stop, int thread, bool sparse_matrix,
Eigen::VectorXi &cv_fold_id, Eigen::VectorXi &A_init,
vector<Algorithm<T1, T2, T3, T4> *> algorithm_list) {
#ifndef R_BUILD
std::srand(123);
#endif
int algorithm_list_size = algorithm_list.size();
// Size of the candidate set:
// usually it is equal to `p`, the number of variable,
// but it could be different in e.g. RPCA.
int beta_size = algorithm_list[0]->get_beta_size(n, p);
// Data packing & normalize:
// pack & initial all information of data,
// including normalize.
Data<T1, T2, T3, T4> data(x, y, normalize_type, weight, g_index, sparse_matrix, beta_size);
if (algorithm_list[0]->model_type == 1 || algorithm_list[0]->model_type == 5) {
add_weight(data.x, data.y, data.weight);
}
// Screening:
// if there are too many noise variables,
// screening can choose the `screening_size` most important variables
// and then focus on them later.
Eigen::VectorXi screening_A;
if (screening_size >= 0) {
screening_A = screening<T1, T2, T3, T4>(data, algorithm_list, screening_size, beta_size,
parameters.lambda_list(0), A_init);
}
// Prepare for CV:
// if CV is enable,
// specify train and test data,
// and initialize the fitting argument inside each fold.
Metric<T1, T2, T3, T4> *metric = new Metric<T1, T2, T3, T4>(eval_type, ic_coef, Kfold);
if (Kfold > 1) {
metric->set_cv_train_test_mask(data, data.n, cv_fold_id);
metric->set_cv_init_fit_arg(beta_size, data.M);
// metric->set_cv_initial_model_param(Kfold, data.p);
// metric->set_cv_initial_A(Kfold, data.p);
// metric->set_cv_initial_coef0(Kfold, data.p);
// if (model_type == 1)
// metric->cal_cv_group_XTX(data);
}
// Fitting and loss:
// follow the search path,
// fit on each parameter combination,
// and calculate ic/loss.
vector<Result<T2, T3>> result_list(Kfold);
if (path_type == 1) {
// sequentical search
#pragma omp parallel for
for (int i = 0; i < Kfold; i++) {
sequential_path_cv<T1, T2, T3, T4>(data, algorithm_list[i], metric, parameters, early_stop, i, A_init,
result_list[i]);
}
} else {
// if (algorithm_type == 5 || algorithm_type == 3)
// {
// double log_lambda_min = log(max(lambda_min, 1e-5));
// double log_lambda_max = log(max(lambda_max, 1e-5));
// result = pgs_path(data, algorithm, metric, s_min, s_max, log_lambda_min, log_lambda_max, powell_path,
// nlambda);
// }
// golden section search
gs_path<T1, T2, T3, T4>(data, algorithm_list, metric, parameters, A_init, result_list);
}
for (int k = 0; k < Kfold; k++) {
algorithm_list[k]->clear_setting();
}
// Get bestmodel && fit bestmodel:
// choose the best model with lowest ic/loss
// and if CV, refit on full data.
int min_loss_index = 0;
int sequence_size = (parameters.sequence).size();
Eigen::Matrix<T2, Dynamic, 1> beta_matrix(sequence_size, 1);
Eigen::Matrix<T3, Dynamic, 1> coef0_matrix(sequence_size, 1);
Eigen::Matrix<VectorXd, Dynamic, 1> bd_matrix(sequence_size, 1);
Eigen::MatrixXd ic_matrix(sequence_size, 1);
Eigen::MatrixXd test_loss_sum = Eigen::MatrixXd::Zero(sequence_size, 1);
Eigen::MatrixXd train_loss_matrix(sequence_size, 1);
Eigen::MatrixXd effective_number_matrix(sequence_size, 1);
if (Kfold == 1) {
// not CV: choose lowest ic
beta_matrix = result_list[0].beta_matrix;
coef0_matrix = result_list[0].coef0_matrix;
ic_matrix = result_list[0].ic_matrix;
train_loss_matrix = result_list[0].train_loss_matrix;
effective_number_matrix = result_list[0].effective_number_matrix;
ic_matrix.col(0).minCoeff(&min_loss_index);
} else {
// CV: choose lowest test loss
for (int i = 0; i < Kfold; i++) {
test_loss_sum += result_list[i].test_loss_matrix;
}
test_loss_sum /= ((double)Kfold);
test_loss_sum.col(0).minCoeff(&min_loss_index);
Eigen::VectorXi used_algorithm_index = Eigen::VectorXi::Zero(algorithm_list_size);
// refit on full data
#pragma omp parallel for
for (int ind = 0; ind < sequence_size; ind++) {
int support_size = parameters.sequence(ind).support_size;
double lambda = parameters.sequence(ind).lambda;
int algorithm_index = omp_get_thread_num();
used_algorithm_index(algorithm_index) = 1;
T2 beta_init;
T3 coef0_init;
Eigen::VectorXi A_init; // start from a clear A_init (not from the given one)
coef_set_zero(beta_size, data.M, beta_init, coef0_init);
Eigen::VectorXd bd_init = Eigen::VectorXd::Zero(data.g_num);
// warmstart from CV's result
for (int j = 0; j < Kfold; j++) {
beta_init = beta_init + result_list[j].beta_matrix(ind) / Kfold;
coef0_init = coef0_init + result_list[j].coef0_matrix(ind) / Kfold;
bd_init = bd_init + result_list[j].bd_matrix(ind) / Kfold;
}
// fitting
algorithm_list[algorithm_index]->update_sparsity_level(support_size);
algorithm_list[algorithm_index]->update_lambda_level(lambda);
algorithm_list[algorithm_index]->update_beta_init(beta_init);
algorithm_list[algorithm_index]->update_coef0_init(coef0_init);
algorithm_list[algorithm_index]->update_bd_init(bd_init);
algorithm_list[algorithm_index]->update_A_init(A_init, data.g_num);
algorithm_list[algorithm_index]->fit(data.x, data.y, data.weight, data.g_index, data.g_size, data.n, data.p,
data.g_num);
// update results
beta_matrix(ind) = algorithm_list[algorithm_index]->get_beta();
coef0_matrix(ind) = algorithm_list[algorithm_index]->get_coef0();
train_loss_matrix(ind) = algorithm_list[algorithm_index]->get_train_loss();
ic_matrix(ind) = metric->ic(data.n, data.M, data.g_num, algorithm_list[algorithm_index]);
effective_number_matrix(ind) = algorithm_list[algorithm_index]->get_effective_number();
}
for (int i = 0; i < algorithm_list_size; i++) {
if (used_algorithm_index(i) == 1) {
algorithm_list[i]->clear_setting();
}
}
}
// Best result
double best_support_size = parameters.sequence(min_loss_index).support_size;
double best_lambda = parameters.sequence(min_loss_index).lambda;
T2 best_beta;
T3 best_coef0;
double best_train_loss, best_ic, best_test_loss;
best_beta = beta_matrix(min_loss_index);
best_coef0 = coef0_matrix(min_loss_index);
best_train_loss = train_loss_matrix(min_loss_index);
best_ic = ic_matrix(min_loss_index);
best_test_loss = test_loss_sum(min_loss_index);
// Restore for normal:
// restore the changes if normalization is used.
restore_for_normal<T2, T3>(best_beta, best_coef0, beta_matrix, coef0_matrix, sparse_matrix, data.normalize_type,
data.n, data.x_mean, data.y_mean, data.x_norm);
// Store in a list for output
List out_result;
#ifdef R_BUILD
out_result = List::create(
Named("beta") = best_beta, Named("coef0") = best_coef0, Named("train_loss") = best_train_loss,
Named("ic") = best_ic, Named("lambda") = best_lambda, Named("beta_all") = beta_matrix,
Named("coef0_all") = coef0_matrix, Named("train_loss_all") = train_loss_matrix, Named("ic_all") = ic_matrix,
Named("effective_number_all") = effective_number_matrix, Named("test_loss_all") = test_loss_sum);
if (path_type == 2) {
out_result.push_back(parameters.support_size_list, "sequence");
}
#else
out_result.add("beta", best_beta);
out_result.add("coef0", best_coef0);
out_result.add("train_loss", best_train_loss);
out_result.add("test_loss", best_test_loss);
out_result.add("ic", best_ic);
out_result.add("lambda", best_lambda);
// out_result.add("beta_all", beta_matrix);
// out_result.add("coef0_all", coef0_matrix);
// out_result.add("train_loss_all", train_loss_matrix);
// out_result.add("ic_all", ic_matrix);
// out_result.add("test_loss_all", test_loss_sum);
#endif
// Restore for screening
// restore the changes if screening is used.
if (screening_size >= 0) {
T2 beta_screening_A;
T2 beta;
T3 coef0;
beta_size = algorithm_list[0]->get_beta_size(n, p);
coef_set_zero(beta_size, data.M, beta, coef0);
#ifndef R_BUILD
out_result.get_value_by_name("beta", beta_screening_A);
slice_restore(beta_screening_A, screening_A, beta);
out_result.add("beta", beta);
out_result.add("screening_A", screening_A);
#else
beta_screening_A = out_result["beta"];
slice_restore(beta_screening_A, screening_A, beta);
out_result["beta"] = beta;
out_result.push_back(screening_A, "screening_A");
#endif
}
delete metric;
// Return the results
return out_result;
}
#endif // SRC_WORKFLOW_H
| 12,239 | 39 | 120 | h |
null | RIQ-main/README.md | # Content
<!-- TOC -->
- [Overview](#overview)
- [Package Architecture](#model-architecture)
- [Dataset](#dataset)
- [ImageNet Classification](#ImageNet)
- [COCO Detection](#COCO)
- [SQuAD Dataset](#SQuAD)
- [Environment Requirements](#environment-requirements)
- [Quick Start](#quick-start)
- [Script Detailed Description](#script-detailed-description)
<!-- /TOC -->
# Overview
This folder holds the code for Rotation-Invariant quantization (RIQ) technique presented in " "
The implementation supports evaluation of the quantization process for the models: VGG, resnet, alexnet, ViT, YOLO, and distilBERT with respect to their tasks
# Package Architecture
The module tree looks as follows, where the last four folder are auto-generated at runtime if needed.
```shell
├── evaluate_quantization.sh
├── evaluate_nlp.py
├── compare_cv.py
├── README.md
├── utils
│ ├── ans.py
│ ├── dataset.py
│ ├── image_dir.py
│ ├── onnx_bridge.py
│ ├── presets.py
│ ├── quantize.py
│ ├── quantize_bert.py
│ ├── quantize_yolo.py
│ ├── download_alexnet.py
│ ├── download_BERT.py
│ ├── download_resnet.py
│ ├── download_VGG.py
│ ├── download_ViT.py
│ └── download_YOLO.py
├── empty_calibration
├── logs
├── models
└── third_party
```
The main script is evaluate_quantization.sh which spawns the relevant quantization and evaluation.
The models folder stores the onnx models which are automatically downloaded during the evaluation.
Logs are saved within the logs folder.
In the case of YOLO, additional code is cloned into the third_party folder.
The quantization algorithm itself is implemented in utils/quantize.py and a supllemental ANS mechanism in utils/ans.py further compresses the quantized model to measure the size of the fully compressed model.
# Dataset
# ImageNet Dataset
To evaluate alexnet, VGG, Resnet and ViT we use ImageNet classification task.
Dataset used: [ImageNet2012](<https://image-net.org/challenges/LSVRC/2012/>)
- Dataset size 224*224 colorful images in 1000 classes
- Train: 1,281,167 images
- Test: 50,000 images
- Data format:JPEG
- **Only the test part of the dataset is used in our script**
The directory tree looks like this with 1000 folders for the different class, and each folder contains \*.JPEG images
```shell
ImageNet2012
├── train
│ ├── n01440764
│ ├── n01443537
│ ├── .
│ ├── .
│ ├── .
│ ├── n13133613
│ └── n15075141
└── val
├── n01440764
├── n01443537
├── .
├── .
├── .
├── n13133613
└── n15075141
```
# COCO Dataset
To evaluate YOLO detection we use the COCO Detection task.
Dataset used: [COCO2017](<https://cocodataset.org/#download>)
- Dataset size, 19G
- Train: 18G, 118000 images
- Val: 1G, 5000 images
- Annotations: 241M, instances, captions, person_keypoints etc
- Data format: image and json files
- **Only the Val part of the dataset is used in our script**
# SQuAD Dataset
To evaluate the distilbert model we use SQuAD v1.1 Questions and Answers Task.
Dataset used: [SQuAD v1.1](<https://rajpurkar.github.io/SQuAD-explorer/explore/1.1/dev/>)
- Val Dataset size:4.8M, 10570 questions + contexts
- Data format: json file
dev-v1.1.json
- **Only the Val part of the dataset is used in our script**
# Environment Requirements
Required python libraries are given in prerequisites.txt file
# Quick Start
After cloning this repository simply run
```bash
./evaluate_quantization.sh MODEL [-v VALIDATION_DATASET] [-d distortion] [-c calibration dataset]
```
# Script Detailed Description
The script downloads a pretrained model, quantize it and evaluate the quantization based on the model's task.
The evaluation/validation dataset must be downloaded in advance by the user and its path should be provided via the VALIDATION_DATASET parameter.
A fully compressed model (quantized+compressed) is not saved, since there is no python-based framework that can use such file.
# Citation
Please site our paper if you find this repository relevant to your research
```
@article{kampeas2023rotation,
title={Rotation Invariant Quantization for Model Compression},
author={Joseph Kampeas and Yury Nahshan and Hanoch Kremer and Gil Lederman and Shira Zaloshinski and Zheng Li and Emir Haleva},
year={2023},
eprint={2303.03106},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
```
| 4,363 | 29.732394 | 208 | md |
null | RIQ-main/compare_cv.py | """compare between the accuracy of the quant model and the base model"""
import os
import sys
os.environ['CURL_CA_BUNDLE'] = ''
os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
import torch
import torchvision
import numpy as np
from utils import presets
from utils.onnx_bridge import OnnxBridge
from utils.quantize import get_quantized_model
from utils.dataset import prepare_dataset_images
def measure_cos_err(lhs, rhs):
"""measure cos error"""
assert lhs.shape == rhs.shape, 'shapes are not the same!'
return lhs / np.linalg.norm(lhs) @ rhs / np.linalg.norm(rhs)
def compare_function(orig_output, quant_output, _):
"""compare function for CV accuracy models"""
return measure_cos_err(orig_output[0].flatten()[:], quant_output[0].flatten()[:])
def data_loader(valdir):
""" loading data """
preprocessing = presets.ClassificationPresetEval(crop_size=224)
dataset_test = torchvision.datasets.ImageFolder(valdir, preprocessing)
return dataset_test
def eval_model(base_model, quant_model, testloader):
""" eval the model and compute accuracy"""
correct_origin = 0
correct_quant = 0
total = 0
i = 0
self_similarity = 0
total_cos_error = 0
with torch.no_grad():
for data in testloader:
images, labels = data
outputs_base = list()
oputputs_quant = list()
for img in images:
img = np.array([img.numpy()])
output_quant = quant_model(img)[0][0, :]
oputputs_quant.append(output_quant)
output_base = base_model(img)[0][0, :]
outputs_base.append(output_base)
total_cos_error += measure_cos_err(output_quant, output_base)
_, predicted_base = torch.max(torch.nn.Softmax(dim=-1)
(torch.Tensor(np.array(outputs_base))), 1)
_, predicted_quant = torch.max(torch.nn.Softmax(dim=-1)
(torch.Tensor(np.array(oputputs_quant))), 1)
total += labels.size(0)
correct_quant += (torch.sum(predicted_quant == labels)).item()
correct_origin += (torch.sum(predicted_base == labels)).item()
self_similarity += (predicted_base == predicted_quant).sum().item()
i += 1
print("iteration ", i, "self similarity: ", self_similarity,
100 * correct_origin / total, 100 * correct_quant / total)
correct_origin = 100 * correct_origin / total
correct_quant = 100 * correct_quant / total
self_similarity = 100 * self_similarity / total
total_cos_error = 100 * total_cos_error /total
print(f'Accuracy of the original network on the 50000 test images: {correct_origin} %')
print(f'Accuracy of the quantized network on the 50000 test images: {correct_quant} %')
print(f'self similarity model { self_similarity }%')
print(f'measure cos distance: {total_cos_error}%')
def main():
"""main"""
model_name = sys.argv[1]
base_onnx = sys.argv[2]
data_path = sys.argv[3]
dataset_calibration_path = sys.argv[4]
dataset_calibration_path = dataset_calibration_path + '*.JPEG'
distortion = float(sys.argv[5])
batch_size = 128
base_model = OnnxBridge(base_onnx)
quant_onnx_fn = base_onnx.replace(".onnx", "_" + str(distortion) + ".onnx")
dataset_calibration = prepare_dataset_images(dataset_calibration_path, base_onnx)
print("Quantizing with distortion constraint", distortion)
quant_model = get_quantized_model(base_onnx, dataset_calibration, distortion=distortion,
compare_function=compare_function)
quant_model.save(quant_onnx_fn)
valdir = os.path.join(data_path, 'val')
if os.path.exists(valdir):
dataset_test = data_loader(valdir)
testloader = torch.utils.data.DataLoader(dataset_test, batch_size=batch_size,
num_workers=4, shuffle=True)
print("Measuring Accuracy on validation dataset")
eval_model(base_model, quant_model, testloader)
else:
print("No validation dataset was provided. Exiting without evaluating accuracy.")
main()
| 4,222 | 41.23 | 92 | py |
null | RIQ-main/evaluate_nlp.py | """evaluate nlp"""
import sys
import os
os.environ['CURL_CA_BUNDLE'] = ''
os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
from collections import Counter
import re
import string
import numpy as np
import torch
from transformers import DistilBertTokenizer
from transformers.data.processors.squad import SquadV1Processor
from utils.onnx_bridge import OnnxBridge
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased-distilled-squad')
def get_prediction(model, qid, size_input):
"""get prediction"""
question = examples[qid_to_example_index[qid]].question_text
context = examples[qid_to_example_index[qid]].context_text
inputs = tokenizer(question, context, return_tensors='pt')
size = dict(inputs)['input_ids'][0].size()[0]
if size > size_input:
input_ids = dict(inputs)['input_ids'][0, :size_input]
attention_mask = dict(inputs)['attention_mask'][0, : size_input]
else:
input_ids = torch.cat([dict(inputs)['input_ids'][0],
torch.zeros(size_input - size, dtype=torch.int64)])
attention_mask = torch.cat(
[dict(inputs)['attention_mask'][0], torch.zeros(size_input - size, dtype=torch.int64)])
inputs2 = [np.array(torch.reshape(input_ids, (1, size_input), )),
np.array(torch.reshape(attention_mask, (1, size_input)))]
outputs = model(inputs2)
answer_start = torch.argmax(torch.Tensor(outputs[0]))
answer_end = torch.argmax(torch.Tensor(outputs[1])) + 1
answer = tokenizer.convert_tokens_to_string(
tokenizer.convert_ids_to_tokens(inputs['input_ids'][0][answer_start:answer_end]))
return answer
def normalize_text(s):
"""normalize text"""
def remove_articles(text):
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def remove_accents(old):
new = re.sub(r'[àáâãäåạằ]', 'a', old)
new = re.sub(r'[èéêë]', 'e', new)
new = re.sub(r'[ìíîï]', 'i', new)
new = re.sub(r'[òóôõö]', 'o', new)
new = re.sub(r'[ùúûü]', 'u', new)
new = re.sub(r'[ć]', 'c', new)
new = re.sub(r'[ś]', 's', new)
new = re.sub(r'[ñńǹň]', 'n', new)
return new
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(remove_accents(lower(s)))))
def get_gold_answers(example):
"""get gold answers"""
gold_answers = \
[tokenizer.convert_tokens_to_string(
tokenizer.convert_ids_to_tokens(
tokenizer(answer["text"], "", return_tensors='pt')['input_ids'][0][1:-2]))
for answer in example.answers if answer["text"]]
if not gold_answers:
gold_answers = [""]
return gold_answers
def compute_exact_match(prediction, truth):
"""compute exact match"""
return int(normalize_text(prediction) == normalize_text(truth))
def compute_f1(prediction, truth):
"""compute f1"""
pred_tokens = normalize_text(prediction).split()
truth_tokens = normalize_text(truth).split()
if len(pred_tokens) == 0 or len(truth_tokens) == 0:
return int(pred_tokens == truth_tokens)
common_tokens = Counter(pred_tokens) & Counter(truth_tokens)
if sum(common_tokens.values()) == 0:
return 0
prec = sum(common_tokens.values()) / len(pred_tokens)
rec = sum(common_tokens.values()) / len(truth_tokens)
return 2 * (prec * rec) / (prec + rec)
def main():
"""main"""
global examples
global qid_to_example_index
model_path = sys.argv[1]
dataset_path = sys.argv[2]
model = OnnxBridge(model_path)
dims = model.get_inputs()
size_input = dims[1][1]
em_score = 0
f1_score = 0
total = 0
processor = SquadV1Processor()
examples = processor.get_dev_examples(dataset_path, filename="dev-v1.1.json")
qid_to_example_index = {example.qas_id: i for i, example in enumerate(examples)}
qid_to_has_answer = {example.qas_id: bool(example.answers) for example in examples}
answer_qids = [qas_id for qas_id, has_answer in qid_to_has_answer.items() if has_answer]
with torch.no_grad():
for i in answer_qids:
example = examples[qid_to_example_index[i]]
prediction = get_prediction(model, i, size_input)
gold_answers = get_gold_answers(example)
em_score += max(compute_exact_match(prediction, answer) for answer in gold_answers)
f1_score += max(compute_f1(prediction, answer) for answer in gold_answers)
total += 1
print('f1=', f1_score / total)
print('em=', em_score / total)
main()
| 4,835 | 32.351724 | 99 | py |
null | RIQ-main/evaluate_quantization.sh | #!/bin/bash
usage() {
echo "evaluate_quantization.sh MODEL [ -v VALIDATION_DATASET] [-d DISTORTION] [-c CALIBRATION_DATA]"
echo " MODEL can be either one of the following models: VGG, resnet, alexnet, ViT, YOLO, or BERT"
echo " alternatively an onnx model path + filename"
echo " VALIDATION_DATASET is a path to an Imagenet folder for the case of VGG, resnet, alexnet or ViT"
echo " a path to a COCO folder for the case of YOLO"
echo " a path to a XXX folder for the case of BERT"
echo " DISTORTION is a cosine similarity constraint for calibration. a small value << 1. Default=0.005"
echo " CALIBRATION_DATA is a set of samples that represent the data. Default means random data will be generated"
echo " note that BERT quantization does not support a calibration dataset"
exit -1
}
checkopts()
{
DISTORTION=0.005
mkdir -p empty_path
mkdir -p models
mkdir -p logs
CAL_DATASET=$(pwd)/empty_path
VAL_DATASET=$(pwd)/empty_path
while getopts 'd:c:v:' opt
do
case "${opt}" in
d)
DISTORTION=$OPTARG
;;
c)
CAL_DATASET=$OPTARG
;;
v)
VAL_DATASET=$OPTARG
;;
*)
echo "Unknown option ${opt}!"
display_usage
exit 1
esac
done
}
if (($# < 1)); then
usage
fi
checkopts ${@:3}
if [[ -f $1 ]]; then
MODEL_ONNX=$1
MODEL=`basename ${MODEL_ONNX} .onnx`
else
MODEL=$1
if [[ $1 == "ViT" ]] || [[ $1 == "alexnet" ]] || [[ $1 == "resnet" ]] || [[ $1 == "VGG" ]] || [[ $1 == "YOLO" ]] || [[ $1 == "BERT" ]]; then
MODEL_ONNX=models/$1.onnx
if [[ ! -f "$MODEL_ONNX" ]]; then
echo "Downloading pretrained $MODEL, this might take a while"
python utils/download_$MODEL.py || exit 1
fi
fi
fi
if [[ ! -f "$MODEL_ONNX" ]]; then
echo "Does not support model $1 or Could not find valid ONNX file in path:" $1
usage
fi
echo 'model:' $MODEL
echo 'onnx file name:' $MODEL_ONNX
echo 'validation dataset:' $VAL_DATASET
echo 'calibration dataset:' $CAL_DATASET
echo 'distortion:' ${DISTORTION}
if [[ ! -d "$CAL_DATASET" ]]; then
echo "Could not find valid CALIBRATION_DATASET directory in path:" $CAL_DATASET
usage
fi
if (($# > 3)); then
LOG_FILE=logs/log_${MODEL}_${DISTORTION}.txt
else
LOG_FILE=logs/log_${MODEL}_${DISTORTION}_random.txt
fi
LOG_FILE=$(realpath $LOG_FILE)
MODEL_ONNX=$(realpath $MODEL_ONNX)
if [[ $MODEL == *"VGG"* ]] || [[ $MODEL == *"resnet"* ]] || [[ $MODEL == "ViT" ]] || [[ $MODEL == "alexnet" ]]; then
python -u compare_cv.py $MODEL $MODEL_ONNX $VAL_DATASET ${CAL_DATASET[*]} $DISTORTION| tee ${LOG_FILE} || exit 1
awk '/Actual CR:/ {print; }' ${LOG_FILE} >> ${LOG_FILE}
elif [[ $MODEL == *"YOLO"* ]]; then
if [[ ! -d third_party/yolov5 ]]; then
echo "cloning third party repo Ultralytics"
mkdir -p third_party/datasets
cd third_party
git clone https://github.com/ultralytics/yolov5.git
cd -
fi
PATH_QUANT_MODEL_ONNX=$(realpath 'models/'$MODEL'_quant.onnx')
python -u utils/quantize_yolo.py $MODEL_ONNX $PATH_QUANT_MODEL_ONNX $DISTORTION $CAL_DATASET | tee ${LOG_FILE}
cd third_party
mkdir -p datasets
rm datasets/coco
ln -s $VAL_DATASET datasets/coco
echo "Evaluating Quantized model" >> ${LOG_FILE}
python -u yolov5/val.py --weights $PATH_QUANT_MODEL_ONNX --data yolov5/data/coco.yaml | tee -a ${LOG_FILE}
echo "Evaluating Original model" >> ${LOG_FILE}
python -u yolov5/val.py --weights $MODEL_ONNX --data yolov5/data/coco.yaml | tee -a ${LOG_FILE}
cd -
elif [[ "${MODEL,,}" == *"bert"* ]]; then
if [[ $CAL_DATASET != $(pwd)/empty_path ]]; then
echo "BERT quantization does not supoort a calibration dataset"
usage
fi
PATH_QUANT_MODEL_ONNX=$(realpath 'models/'$MODEL'_quant.onnx')
python -u utils/quantize_bert.py $MODEL_ONNX $PATH_QUANT_MODEL_ONNX $DISTORTION $VAL_DATASET | tee ${LOG_FILE}
echo "Evaluating Quantized model" >> ${LOG_FILE}
python -u evaluate_nlp.py $PATH_QUANT_MODEL_ONNX $VAL_DATASET | tee -a ${LOG_FILE}
echo "Evaluating Original model" >> ${LOG_FILE}
python -u evaluate_nlp.py $MODEL_ONNX $VAL_DATASET | tee -a ${LOG_FILE}
else
usage
fi
| 4,482 | 34.579365 | 145 | sh |
null | RIQ-main/utils/ans.py | """ans"""
import numpy as np
def first_1_index(val):
"""Return the Index of the First Non-Zero Bit."""
counter = 0
while val > 1:
counter += 1
val = val >> 1
return counter
def output_nb_bits(state, nb_bits):
"""Output NbBits to a BitStream"""
mask = (1 << nb_bits) - 1
little = state & mask
if nb_bits > 0:
string = "{:b}".format(little)
else:
return ""
while len(string) < nb_bits:
string = "0" + string
return string
def bits_to_state(bit_stream, nb_bits):
""" Convert Bits from Bitstream to the new State. """
if nb_bits == 0 or len(bit_stream) == 0:
return 0, bit_stream
if nb_bits == len(bit_stream):
rest = int(bit_stream, 2)
return rest, ""
bits = bit_stream[-nb_bits:]
rest = int(bits, 2)
remaining = bit_stream[:-nb_bits]
return rest, remaining
def decode_symbol(state, bit_stream, state_t):
""" Return a Symbol + New State + Bitstream from the bitStream and State. """
symbol = state_t[state]['symbol']
nb_bits = state_t[state]['nbBits']
rest, bit_stream = bits_to_state(bit_stream, nb_bits)
state = state_t[state]['newX'] + rest
return symbol, state, bit_stream
def efficient_decode_symbol(bit_stream, end, state, state_t):
""" Convert Bits from Bitstream to the new State. """
symbol = state_t[state]['symbol']
nb_bits = state_t[state]['nbBits']
if nb_bits == 0 or end == 0:
state = state_t[state]['newX']
return symbol, state, end
start = max(0,end-nb_bits)
bits = bit_stream[start:end]
rest = int(bits, 2)
state = state_t[state]['newX'] + rest
return symbol, state, start
class TabledANS:
def __init__(self, symbol_occurrences, table_log=5):
self.table_log = table_log
self.table_size = 1 << table_log
if self.table_size < len(symbol_occurrences):
raise RuntimeError("Table size {} less than number of symbols {}"
.format(self.table_size, len(symbol_occurrences)))
freq_sum = np.sum(list(symbol_occurrences.values()))
if freq_sum != self.table_size:
# Normalize frequencies table
freq_norm = \
np.array([max(1, np.round(self.table_size * symbol_occurrences[sym] / freq_sum))
for sym in symbol_occurrences.keys()])
freq_sum_norm = np.sum(freq_norm)
reminder = self.table_size - freq_sum_norm
#print(reminder)
while reminder < 0:
#shrink the frequencies to fit the table
max_ix = np.argmax(freq_norm)
freq_norm[max_ix] -= 1
reminder += 1
if reminder > 0:
#grow the frequencies to fit the table
max_ix = np.argmax(freq_norm)
freq_norm[max_ix] += reminder
assert freq_norm.sum() == self.table_size
symbol_occurrences = dict([(k, int(freq_norm[i]))
for i, k in enumerate(symbol_occurrences.keys())])
####
# Define the Initial Positions of States in StateList.
####
symbol_list = [symbol for symbol, occcurences in symbol_occurrences.items()]
cumulative = [0 for _ in range(len(symbol_list)+2)]
for u in range(1, len(symbol_occurrences.items())+ 1):
cumulative[u] = cumulative[u - 1] + list(symbol_occurrences.items())[u-1][1]
cumulative[-1] = self.table_size + 1
#####
# Spread Symbols to Create the States Table
#####
high_thresh = self.table_size - 1
state_table = [0 for _ in range(self.table_size)]
table_mask = self.table_size - 1
step = ((self.table_size >> 1) + (self.table_size >> 3) + 3)
pos = 0
for symbol, occurrences in symbol_occurrences.items():
for i in range(occurrences):
state_table[pos] = symbol
pos = (pos + step) & table_mask
#while pos > high_thresh: print("Huuuh") position = (pos + step) & table_mask
assert pos == 0
#####
# Build Coding Table from State Table
#####
#outputBits = [0 for _ in range(self.tableSize)]
self.coding_table = [0 for _ in range(self.table_size)]
cumulative_cp = cumulative.copy()
for i in range(self.table_size):
s = state_table[i]
index = symbol_list.index(s)
self.coding_table[cumulative_cp[index]] = self.table_size + i
cumulative_cp[index] += 1
#####
# Create the Symbol Transformation Table
#####
total = 0
self.symbol_tt = {}
for symbol, occurrences in symbol_occurrences.items():
self.symbol_tt[symbol] = {}
if occurrences == 1:
self.symbol_tt[symbol]['deltaNbBits'] = (self.table_log << 16) - (1 << self.table_log)
self.symbol_tt[symbol]['deltaFindState'] = total - 1
elif occurrences > 0:
max_bits_out = self.table_log - first_1_index(occurrences - 1)
min_state_plus = occurrences << max_bits_out
self.symbol_tt[symbol]['deltaNbBits'] = (max_bits_out << 16) - min_state_plus
self.symbol_tt[symbol]['deltaFindState'] = total - occurrences
total += occurrences
#print("deltaNbBits of symbol ", symbol, " is ", self.symbolTT[symbol]['deltaNbBits'])
# print(self.symbolTT)
#####
# Generate a Decoding Table
#####
self.decode_table = [{} for _ in range(self.table_size)]
nextt = list(symbol_occurrences.items())
for i in range(self.table_size):
t = {}
t['symbol'] = state_table[i]
index = symbol_list.index(t['symbol'])
x = nextt[index][1]
nextt[index] = (nextt[index][0], nextt[index][1] + 1)
t['nbBits'] = self.table_log - first_1_index(x)
t['newX'] = (x << t['nbBits']) - self.table_size
self.decode_table[i] = t
#print(t['symbol'] - min(state_table), t['nbBits'], t['newX'])
#print("decodeTable size is ", len(self.decodeTable))
@staticmethod
def from_data(data, table_log=None):
"""from data"""
v, c = np.unique(data, return_counts=True)
symbol_occurrences = dict([(v_, c_) for v_, c_ in zip(v, c)])
if table_log is None:
table_log = max(5, 3 + int(np.ceil(np.log2(len(c)))))
return TabledANS(symbol_occurrences, table_log)
def encode_efficient(self, symbol, state, symbol_tt):
"""encode efficient"""
symbol_tt = symbol_tt[symbol]
nb_bits_out = (state + symbol_tt['deltaNbBits']) >> 16
eff = output_nb_bits(state, nb_bits_out)
state = self.coding_table[(state >> nb_bits_out) + symbol_tt['deltaFindState']]
return state, eff
def encode_efficient_data(self, inpu):
"""encode efficient data"""
eff_list = []
state, eff = self.encode_efficient(inpu[0], 0, self.symbol_tt)
#eff_list.append(eff)
for char in inpu:
state, eff = self.encode_efficient(char, state, self.symbol_tt)
eff_list.append(eff)
eff = output_nb_bits(state - self.table_size, self.table_log) #Includes Current Bit
eff_list.append(eff)
bit_stream = ''.join(eff_list)
return bit_stream
def encode_symbol(self, symbol, state, bit_stream, symbol_tt):
"""Encode a Symbol Using tANS, giving the current state, the symbol, and the bitstream and STT"""
symbol_tt = symbol_tt[symbol]
nb_bits_out = (state + symbol_tt['deltaNbBits']) >> 16
bit_stream += output_nb_bits(state, nb_bits_out)
state = self.coding_table[(state >> nb_bits_out) + symbol_tt['deltaFindState']]
return state, bit_stream
def encode_data(self, inpu):
""" Functions to Encode and Decode Streams of Data. """
state, bit_stream = self.encode_symbol(inpu[0], 0, "", self.symbol_tt)
bit_stream = ""
for char in inpu:
state, bit_stream = self.encode_symbol(char, state, bit_stream, self.symbol_tt)
bit_stream += output_nb_bits(state - self.table_size, self.table_log) #Includes Current Bit
return bit_stream
def decode_data(self, bit_stream):
""" decode data"""
output = []
state, bit_stream = bits_to_state(bit_stream, self.table_log)
end = len(bit_stream)
while end > 0:
symbol, state, end = efficient_decode_symbol(bit_stream, end, state, self.decode_table)
output.append(symbol)
# cover a corner case when last symbols encoded with zero bits
while self.decode_table[state]['nbBits'] == 0 and self.decode_table[state]['newX'] != 0:
symbol = self.decode_table[state]['symbol']
state = self.decode_table[state]['newX']
if self.decode_table[state]['nbBits'] != 0:
break
output.append(symbol)
output.reverse()
return output
@property
def total_tables_size(self):
"""total tables size"""
return len(self.coding_table) + 3 * len(self.decode_table) + 2 * len(self.symbol_tt)
| 9,391 | 38.462185 | 105 | py |
null | RIQ-main/utils/dataset.py | """prepare calibration dataset for quantization"""
import numpy as np
import torch as pt
from transformers import DistilBertTokenizer
from transformers.data.processors.squad import SquadV1Processor
from utils.onnx_bridge import OnnxBridge
from utils.image_dir import ImageDir
def prepare_dataset_images(dataset_path, path_ob):
"""prepare dataset images"""
ob = OnnxBridge(path_ob)
dims = ob.get_inputs()
image_dims = tuple(dims[0])
images = ImageDir(dataset_path, image_dims)
_images = []
if dims[1:] != []:
pt.manual_seed(20)
for im_idx in range(len(images)):
rand_inp = [images[im_idx]]
for el in dims[1:]:
rand_inp.append(10 * pt.randn(tuple(el)).numpy())
_images.append(rand_inp)
images = _images # [0]
if len(images) != 0:
print("Using Calibration set with", len(images), "sample images")
else:
print("Using Calibration set with randomly generated input")
images = []
for i in range(3):
images.append(np.random.normal(size=tuple(dims[0])).astype(np.float32))
return images
def calibration_equal_size(size_text, size_input):
size_equals_size_input = True
for k in size_text:
if k != size_input:
size_equals_size_input = False
return size_equals_size_input
def prepare_dataset_text(dataset_path, size_input=384):
"""Choose 3 samples from the validation dataset for calibration
best performance is if we choose text which is similar in length to the model's input size"""
processor = SquadV1Processor()
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased-distilled-squad')
examples = processor.get_dev_examples(dataset_path, filename="dev-v1.1.json")
qid_to_example_index = {example.qas_id: i for i, example in enumerate(examples)}
qid_to_has_answer = {example.qas_id: bool(example.answers) for example in examples}
answer_qids = [qas_id for qas_id, has_answer in qid_to_has_answer.items() if has_answer]
size_text = np.array([0, 0, 0])
text = np.array([None, None, None])
if len(answer_qids) >= 3:
for i in range(3):
question = examples[qid_to_example_index[answer_qids[i]]].question_text
context = examples[qid_to_example_index[answer_qids[i]]].context_text
inputs = tokenizer.encode_plus(question, context, return_tensors='pt')
size = dict(inputs)['input_ids'][0].size()[0]
text[i] = inputs
size_text[i] = size
for i in range(len(answer_qids)):
if calibration_equal_size(size_text, size_input):
return text
question = examples[qid_to_example_index[answer_qids[i]]].question_text
context = examples[qid_to_example_index[answer_qids[i]]].context_text
inputs = tokenizer.encode_plus(question, context, return_tensors='pt')
size = dict(inputs)['input_ids'][0].size()[0]
if abs(size - size_input) < np.max(abs(size_text - size_input)):
j = np.argmax(abs(size_text - size))
if size > size_input:
input_ids = dict(inputs)['input_ids'][0, :size_input]
attention_mask = dict(inputs)['attention_mask'][0, : size_input]
else:
input_ids = pt.cat([dict(inputs)['input_ids'][0],
pt.zeros(size_input - size, dtype=pt.int64)])
attention_mask = pt.cat([dict(inputs)['attention_mask'][0],
pt.zeros(size_input - size, dtype=pt.int64)])
inputs = [np.array(pt.reshape(input_ids, (1, size_input), )),
np.array(pt.reshape(attention_mask, (1, size_input)))]
text[j] = inputs
size_text[j] = size
print("Created a Calibration set with", len(text), "text samples of lengths", size_text)
else:
text = []
for i in range(3):
input_ids = np.random.randint(20000, size=(1, size_input), dtype=np.int64)
attention_mask = np.ones((1, size_input), dtype=np.int64)
text.append((input_ids, attention_mask))
return text
| 4,287 | 43.206186 | 97 | py |
null | RIQ-main/utils/download_BERT.py | """ download BERT model from Neural Magic Sparse Zoo"""
import os
os.environ['CURL_CA_BUNDLE'] = ''
os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
from sparsezoo import Model
stub = "zoo:nlp/question_answering/distilbert-none/pytorch/huggingface/squad/base-none"
download_path = 'models/downloads/sparsezoo_bert'
model = Model(stub, download_path=download_path)
os.system('mv models/downloads/sparszaoo_bert/model.onnx models/BERT.onnx')
os.system('rm -df models/downloads')
| 471 | 38.333333 | 87 | py |
null | RIQ-main/utils/download_VGG.py | """ download VGG model"""
import torch
from torchvision.models import vgg16, VGG16_Weights
model = vgg16(weights=VGG16_Weights.IMAGENET1K_V1)
model.eval()
inp = torch.randn(1, 3, 224, 224)
in_names = ["actual_input"]
out_name = ["output"]
torch.onnx.export(model, inp, "models/VGG.onnx", verbose=False, input_names=in_names, output_names=out_name,
export_params=True)
| 387 | 31.333333 | 108 | py |
null | RIQ-main/utils/download_ViT.py | """download ViT model"""
import torch
from torchvision.models import vit_b_16, ViT_B_16_Weights
model = vit_b_16(weights=ViT_B_16_Weights.IMAGENET1K_V1)
model.eval()
inp = torch.randn(1, 3, 224, 224)
in_names = ["actual_input"]
out_name = ["output"]
torch.onnx.export(model, inp, "models/ViT.onnx", verbose=False, input_names=in_names,
output_names=out_name, export_params=True)
| 398 | 32.25 | 85 | py |
null | RIQ-main/utils/download_YOLO.py | """download yolo model from sparsezoo"""
import os
from sparsezoo import Model
stub = "zoo:cv/detection/yolov5-s/pytorch/ultralytics/coco/base-none"
download_path = 'models/downloads/sparsezoo_yolo'
model = Model(stub, download_path=download_path)
os.system('mv models/downloads/sparsezoo_yolo/model.onnx models/YOLO.onnx')
os.system('rm -dfr models/downloads')
| 364 | 32.181818 | 75 | py |
null | RIQ-main/utils/download_alexnet.py | """download alexnet model"""
from torchvision.models import alexnet, AlexNet_Weights
import torch
model = alexnet(weights=AlexNet_Weights.IMAGENET1K_V1)
model.eval()
inputs = torch.randn(1, 3, 224, 224)
in_names = ["actual_input"]
out_name = ["output"]
torch.onnx.export(model, inputs, "models/alexnet.onnx", verbose=False, input_names=in_names,
output_names=out_name, export_params=True)
| 408 | 33.083333 | 92 | py |
null | RIQ-main/utils/download_resnet.py | """download resnet model"""
import torch
from torchvision.models import resnet50, ResNet50_Weights
model = resnet50(weights=ResNet50_Weights.IMAGENET1K_V1)
model.eval()
inp = torch.randn(1, 3, 224, 224)
in_names = ["actual_input"]
out_name = ["output"]
torch.onnx.export(model, inp, "models/resnet.onnx", verbose=False, input_names=in_names,
output_names=out_name, export_params=True)
| 404 | 32.75 | 88 | py |
null | RIQ-main/utils/image_dir.py | """prepare images for calibration dataset"""
import glob
import cv2
import numpy as np
class ImageDir:
"""image dir"""
def __init__(self, dataset_calibration_path, dims=(1, 3, 512, 512)):
self.dataset_calibration_path = dataset_calibration_path
self.dims = dims
self.additional_input_dims = dims[1:]
self.fns = glob.glob(self.dataset_calibration_path, recursive=True)
self.originals = [cv2.imread(fn) for fn in self.fns]
def process(i):
i = cv2.resize(i, (self.dims[2], self.dims[2]))
i = (np.float32(i)/255-0.5)*2
if self.dims[1] == 3:
i = i.transpose(2, 0, 1).reshape(1, 3, i.shape[1], i.shape[0])
else:
i = i.reshape(1, i.shape[0], i.shape[1], i.shape[2])
return i
self.processed = [process(o) for o in self.originals]
def __len__(self):
return len(self.originals)
def __getitem__(self, ix):
if self.dims[1] == 3 or self.dims[-1] == 3:
return self.processed[ix].copy()
elif self.dims[1] == 6:
return hstack((self.processed[ix], self.processed[(ix+0)%len(self.processed)]))
else:
raise ValueError("ImageDir: model requires image dim that is not supported.")
| 1,296 | 36.057143 | 91 | py |
null | RIQ-main/utils/onnx_bridge.py | """onnx bridge"""
import numpy as np
import onnx
import onnx.numpy_helper as nh
import onnxruntime
import torch
class OnnxBridge:
"""class onnxBridge"""
def __init__(self, fn):
self.m = onnx.load(fn)
cuda = torch.cuda.is_available()
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] \
if cuda else ['CPUExecutionProvider']
self.sess = onnxruntime.InferenceSession(fn, providers=providers)
def save(self, fn):
onnx.save(self.m, fn)
def __call__(self, x):
if not self.sess:
cuda = torch.cuda.is_available()
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] \
if cuda else ['CPUExecutionProvider']
self.sess = onnxruntime.InferenceSession(self.m.SerializeToString(),
providers=providers)
names = [i.name for i in self.sess.get_inputs()]
if not type(x) in (list, tuple):
x = [x]
return self.sess.run(None, {name:v for name, v in zip(names, x)})
def get_weights(self):
return {(i, w.name): nh.to_array(w).copy() for i, w in enumerate(self.m.graph.initializer)}
def set_weights(self, weights):
assert len(weights) == len(self.m.graph.initializer), 'incorrect number of weights!'
for k, w in weights.items():
i, name = k
if self.m.graph.initializer[i].data_type == 1:
self.m.graph.initializer[i].CopyFrom(nh.from_array(np.float32(w), name=name))
self.sess = None
def get_inputs(self):
inputs = []
for inp in self.m.graph.input:
tensor_type = inp.type.tensor_type
if tensor_type.HasField("shape"):
# iterate through dimensions of the shape:
t_inp = []
for d in tensor_type.shape.dim:
t_inp.append(d.dim_value)
inputs.append(t_inp)
else:
raise ValueError("Inputs shape not found")
return inputs
| 2,083 | 34.931034 | 99 | py |
null | RIQ-main/utils/presets.py | """presets"""
import torch
from torchvision.transforms import autoaugment, transforms
from torchvision.transforms.functional import InterpolationMode
class Chann3:
"""chan3"""
def __init__(self):
self.transform = transforms.Grayscale(3)
def __call__(self, x):
if len(x.mode) == 1:
return self.transform(x)
return x
class ClassificationPresetTrain:
def __init__(
self,
crop_size,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
interpolation=InterpolationMode.BILINEAR,
hflip_prob=0.5,
auto_augment_policy=None,
random_erase_prob=0.0,
):
trans = [transforms.RandomResizedCrop(crop_size, interpolation=interpolation)]
if hflip_prob > 0:
trans.append(transforms.RandomHorizontalFlip(hflip_prob))
if auto_augment_policy is not None:
if auto_augment_policy == "ra":
trans.append(autoaugment.RandAugment(interpolation=interpolation))
elif auto_augment_policy == "ta_wide":
trans.append(autoaugment.TrivialAugmentWide(interpolation=interpolation))
elif auto_augment_policy == "augmix":
trans.append(autoaugment.AugMix(interpolation=interpolation))
else:
aa_policy = autoaugment.AutoAugmentPolicy(auto_augment_policy)
trans.append(autoaugment.AutoAugment(policy=aa_policy, interpolation=interpolation))
trans.extend(
[
transforms.PILToTensor(),
transforms.ConvertImageDtype(torch.float),
transforms.Normalize(mean=mean, std=std),
]
)
if random_erase_prob > 0:
trans.append(transforms.RandomErasing(p=random_erase_prob))
self.transforms = transforms.Compose(trans)
def __call__(self, img):
return self.transforms(img)
class ClassificationPresetEval:
"""class ClassificationPresetEval"""
def __init__(
self,
crop_size,
resize_size=256,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
interpolation=InterpolationMode.BILINEAR,
):
self.transforms = transforms.Compose(
[
transforms.Resize(resize_size, interpolation=interpolation),
transforms.CenterCrop(crop_size),
Chann3(),
transforms.PILToTensor(),
transforms.ConvertImageDtype(torch.float),
transforms.Normalize(mean=mean, std=std),
]
)
def __call__(self, img):
return self.transforms(img)
| 2,729 | 34 | 100 | py |
null | RIQ-main/utils/quantize.py | """quantizer"""
from math import sqrt
import numpy as np
from utils.onnx_bridge import OnnxBridge
from utils import ans
import pickle
sanity = False
verbose = False
minsize = 512
min_out_channel_size = 1
def compressed_bits(qw):
"""compressed bits"""
_, c = np.unique(qw, return_counts=True)
p = c / sum(c)
return sum(-np.log2(p) * c) + (16 + 16) * len(c)
def measure_cos_err(p, t):
"""measure cos err"""
assert p.shape == t.shape, 'shapes are not the same!'
return p / np.linalg.norm(p) @ t / np.linalg.norm(t)
def measure_cos_sim(p, t, _):
"""measure cos distance"""
return measure_cos_err(p[0].flatten()[:], t[0].flatten()[:])
def bitstring_to_bytes(s):
s = '1' + s # to keep leading zeros
return int(s,2).to_bytes((len(s)+7) // 8, byteorder='big')
def bytes_to_bitstring(bs):
return bin(int.from_bytes(bs, byteorder='big')).lstrip('0b')[1:]
def calculate_compressed_size(qwn, emulate_compression=False, save_to=None):
"""calculate_compressed_size"""
min_out_channel_size = 1
compressed_size = 0
riq_dict = {}
for k, qwt in qwn.items():
qw, delta = qwt
shape = qw.shape
if qw.size > minsize and len(shape) > 1 and shape[0] > min_out_channel_size:
qw = qw.flatten().astype(np.int32)
# encoding is expensive.
# don't do encoding in each step, entropy limit approximation is sufficient
if emulate_compression:
compressed_size += compressed_bits(qw)
else:
tans = ans.TabledANS.from_data(qw)
bit_stream = tans.encode_efficient_data(qw)
if sanity:
print("Sanity check for layer ", k)
arr = np.array(tans.decode_data(bit_stream))
assert np.array_equal(qw, arr)
compressed_size += len(bit_stream) + tans.total_tables_size
if save_to != None:
riq_dict[k] = (tans, bitstring_to_bytes(bit_stream), delta, shape)
else:
compressed_size += 32 * qw.size
riq_dict[k] = (None, qw * delta, 1.0, shape)
if save_to != None:
with open(save_to, 'wb') as fn:
pickle.dump(riq_dict, fn)
return compressed_size
def get_quantized_model(
model_fn,
calibration_dataset,
eps=0.01,
distortion=0.005,
skip_first=False,
compare_function=measure_cos_sim,
save_to=None):
"""should return quantized model
do quantization and print quantization logging"""
err_thr = 1.0 - distortion
ob = OnnxBridge(model_fn)
min_out_channel_size = 1
original_outs = [ob(i) for i in calibration_dataset]
ws = ob.get_weights()
qws = {}
qwn = {}
max_dim = 0
prod_norm = 1.0
model_numel = 0
for k, w in ws.items():
if w.size > minsize and len(w.shape) > 1 and w.shape[0] > min_out_channel_size:
model_numel += w.size
prod_norm *= sqrt((w ** 2).sum())
if w.size > max_dim:
max_dim = w.size
upper_bound = sqrt(max_dim / 24.0) / (sqrt(eps) * eps) # (sqrt(eps)*eps)
lower_bound = sqrt(max_dim / 24.0) / (1 - eps) # (1-eps)
step = sqrt(upper_bound - lower_bound)
print("searching for k in the range [", lower_bound, ",", upper_bound, "] with steps of: ", step)
idx = 0
k_const = lower_bound
while (step > 3 and k_const <= upper_bound):
skip_f = skip_first
w_size = 0
compressed_size = 0
stat = []
epsilons = []
##################################################
for k, w in ws.items():
w_size += 32 * w.size
if skip_f:
qws[k] = w.copy()
qwn[k] = (w.copy(), 1.0)
skip_f = False
continue
if w.size > minsize and len(w.shape) > 1 and w.shape[0] > min_out_channel_size:
delta = np.linalg.norm(w) / k_const + np.linalg.norm(w) * eps \
* sqrt(24 / w.size)
epsilons += [(eps + sqrt(w.size / 24) / k_const) ** 2]
stat += [np.ceil((w.max() - w.min()) / delta)]
qw = np.round(w / delta).flatten().astype(np.int32)
qws[k] = qw.reshape(w.shape) * delta
qwn[k] = (qw.reshape(w.shape), delta)
else:
qws[k] = w.copy()
qwn[k] = (w.copy(), 1.0)
##################################################################
ob.set_weights(qws)
outs = [ob(i) for i in calibration_dataset]
mean_err = 0
for orig_output, quant_output, inputs in zip(original_outs, outs, calibration_dataset):
mean_err += compare_function(orig_output, quant_output, inputs)
mean_err /= len(outs)
if mean_err > err_thr:
print("k = ", end="")
print('%0.2f' % k_const, end="")
print(" complies with the distortion constraint", distortion, end="")
print(". Approximated CR: ", w_size / calculate_compressed_size(qwn, True))
if verbose:
print("err: ", 1 - mean_err)
print("step: ", step)
print("number of bins:")
print(stat)
print("mean rate:")
print(sum([np.log2(s) for s in stat]) / len(stat))
print("epsilons:")
print(epsilons)
print("mean epsilon:")
print(sum(epsilons) / len(epsilons))
print("mean_err:/out_err")
print((sum(epsilons) / len(epsilons)) / (1 - mean_err))
upper_bound = k_const
step = sqrt(step)
lower_bound = max(lower_bound, k_const - step * np.floor(step))
k_const = lower_bound
idx = 0
if step > 3:
print("searching for k in the range [", lower_bound, ",", upper_bound, "] with steps of: ", step)
else:
idx += 1
k_const = lower_bound + idx * step
# For quantize model that fits constraint, do encoding to measure actual compression rate
print("Start compressing with ANS encoder..." )
print("ANS achieved CR: ", w_size / calculate_compressed_size(qwn, False, save_to))
return ob
def get_quantized_model_by_const(model_fn, const):
"""get quantized model by const"""
ob = OnnxBridge(model_fn)
min_out_channel_size = 1
ws = ob.get_weights()
qws = {}
qwn = {}
eps = 0.01
n_const = const
w_size = 0
minsize = 10
##################################################
for k, w in ws.items():
w_size += 32 * w.size
if w.size > minsize and len(w.shape) > 1 and w.shape[0] > min_out_channel_size:
delta = np.linalg.norm(w) / n_const + np.linalg.norm(w) * eps * sqrt(24 / w.size)
qw = np.round(w / delta).flatten().astype(np.int32)
qws[k] = qw.reshape(w.shape) * delta
qwn[k] = qw.reshape(w.shape)
else:
qws[k] = w.copy()
qwn[k] = w.copy()
ob.set_weights(qws)
print("ANS achieved CR: ", w_size / calculate_compressed_size(qwn, False))
return ob
| 7,269 | 34.291262 | 113 | py |
null | RIQ-main/utils/quantize_bert.py | """quantize bert model"""
import sys
import os
os.environ['CURL_CA_BUNDLE'] = ''
os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
import onnx
import numpy as np
from onnx_bridge import OnnxBridge
from quantize import get_quantized_model
from dataset import prepare_dataset_text
def measure_cos_err(lhs, rhs):
"""measure cos error"""
assert lhs.shape == rhs.shape, 'shapes are not the same!'
return lhs / np.linalg.norm(lhs) @ rhs / np.linalg.norm(rhs)
def bert_compare_function(orig_output, quant_output, inputs):
"""compare function for bert"""
size = np.sum(inputs[1])
return measure_cos_err(orig_output[0].flatten()[:size], quant_output[0].flatten()[:size])
def main():
""" main """
base_onnx = sys.argv[1]
new_model_path = os.path.join(sys.argv[2])
distortion = float(sys.argv[3])
dataset_path = sys.argv[4]
model = OnnxBridge(base_onnx)
dims = model.get_inputs()
size_input = dims[1][1]
dataset_calibration = prepare_dataset_text(dataset_path, size_input)
quant_model = get_quantized_model(
base_onnx,
dataset_calibration,
distortion=distortion,
compare_function=bert_compare_function)
onnx.save(quant_model.m, new_model_path)
main()
| 1,239 | 28.52381 | 93 | py |
null | RIQ-main/utils/quantize_yolo.py | """quantize yolo model"""
import sys
import os
os.environ['CURL_CA_BUNDLE'] = ''
os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
import onnx
import numpy as np
from quantize import get_quantized_model, get_quantized_model_by_const
from dataset import prepare_dataset_images
def measure_cos_err(lhs, rhs):
"""measure cos error"""
assert lhs.shape == rhs.shape, 'shapes are not the same!'
return lhs / np.linalg.norm(lhs) @ rhs / np.linalg.norm(rhs)
def compare_yolo_output(orig_output, quant_output, _):
"""compare function for yolo"""
return measure_cos_err(orig_output[0].flatten()[-102000:], quant_output[0].flatten()[-102000:])
def main():
""" main """
base_onnx = sys.argv[1]
new_model_path = os.path.join(sys.argv[2])
if len(sys.argv) > 4:
distortion = float(sys.argv[3])
dataset_calibration_path = sys.argv[4] + '/*.jpg'
dataset_calibration = prepare_dataset_images(dataset_calibration_path, base_onnx)
quant_model = get_quantized_model(base_onnx, dataset_calibration, distortion=distortion,
compare_function=compare_yolo_output)
else:
constant = float(sys.argv[3])
quant_model = get_quantized_model_by_const(base_onnx, constant)
onnx.save(quant_model.m, new_model_path)
main()
| 1,317 | 35.611111 | 99 | py |
null | OpenOOD-main/.pre-commit-config.yaml | exclude: ^tests/data/
repos:
- repo: https://github.com/PyCQA/flake8.git
rev: 3.8.3
hooks:
- id: flake8
- repo: https://github.com/pre-commit/mirrors-yapf
rev: v0.30.0
hooks:
- id: yapf
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.1.0
hooks:
- id: trailing-whitespace
- id: check-yaml
- id: end-of-file-fixer
- id: double-quote-string-fixer
- id: check-merge-conflict
- id: fix-encoding-pragma
args: ["--remove"]
- id: mixed-line-ending
args: ["--fix=lf"]
- repo: https://github.com/codespell-project/codespell
rev: v2.1.0
hooks:
- id: codespell
args: ["--ignore-words=codespell_ignored.txt"]
- repo: https://github.com/myint/docformatter
rev: v1.3.1
hooks:
- id: docformatter
args: ["--in-place", "--wrap-descriptions", "79"]
| 893 | 26.090909 | 57 | yaml |
null | OpenOOD-main/CODE_OF_CONDUCT.md | # Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
yangjingkang001@gmail.com.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
| 5,227 | 39.527132 | 80 | md |
null | OpenOOD-main/CONTRIBUTING.md | ## Contributing to OpenOOD
All kinds of contributions are welcome, including but not limited to the following.
- Integrate more methods under generalized OOD detection
- Fix typo or bugs
- Add new features and components
### Workflow
1. fork and pull the latest OpenOOD repository
2. checkout a new branch (do not use master branch for PRs)
3. commit your changes
4. create a PR
```{note}
If you plan to add some new features that involve large changes, it is encouraged to open an issue for discussion first.
```
### Code style
#### Python
We adopt [PEP8](https://www.python.org/dev/peps/pep-0008/) as the preferred code style.
We use the following tools for linting and formatting:
- [flake8](http://flake8.pycqa.org/en/latest/): A wrapper around some linter tools.
- [yapf](https://github.com/google/yapf): A formatter for Python files.
- [isort](https://github.com/timothycrosley/isort): A Python utility to sort imports.
- [markdownlint](https://github.com/markdownlint/markdownlint): A linter to check markdown files and flag style issues.
- [docformatter](https://github.com/myint/docformatter): A formatter to format docstring.
Style configurations of yapf and isort can be found in [setup.cfg](./setup.cfg).
We use [pre-commit hook](https://pre-commit.com/) that checks and formats for `flake8`, `yapf`, `isort`, `trailing whitespaces`, `markdown files`,
fixes `end-of-files`, `double-quoted-strings`, `python-encoding-pragma`, `mixed-line-ending`, sorts `requirments.txt` automatically on every commit.
The config for a pre-commit hook is stored in [.pre-commit-config](./.pre-commit-config.yaml).
After you clone the repository, you will need to install initialize pre-commit hook.
```shell
pip install -U pre-commit
```
From the repository folder
```shell
pre-commit install
```
## Contributing to OpenOOD leaderboard
We welcome new entries submitted to the leaderboard. Please follow the instructions below to submit your results.
1. Evaluate your model/method with OpenOOD's benchmark and evaluator such that the comparison is fair.
2. Report your new results by opening an issue. Remember to specify the following information:
- **`Training`**: The training method of your model, e.g., `CrossEntropy`.
- **`Postprocessor`**: The postprocessor of your model, e.g., `MSP`, `ReAct`, etc.
- **`Near-OOD AUROC`**: The AUROC score of your model on the near-OOD split.
- **`Far-OOD AUROC`**: The AUROC score of your model on the far-OOD split.
- **`ID Accuracy`**: The accuracy of your model on the ID test data.
- **`Outlier Data`**: Whether your model uses the outlier data for training.
- **`Model Arch.`**: The architecture of your base classifier, e.g., `ResNet18`.
- **`Additional Description`**: Any additional description of your model, e.g., `100 epochs`, `torchvision pretrained`, etc.
3. Ideally, send us a copy of your model checkpoint so that we can verify your results on our end. You can either upload the checkpoint to a cloud storage and share the link in the issue, or send us an email at [jz288@duke.edu](mailto:jz288@duke.edu).
| 3,077 | 43.608696 | 251 | md |
null | OpenOOD-main/README.md | # OpenOOD: Benchmarking Generalized OOD Detection
[-b31b1b?style=for-the-badge)](https://openreview.net/pdf?id=gT6j4_tskUt)
[-yellowgreen?style=for-the-badge)](https://arxiv.org/abs/2306.09301)
[](https://zjysteven.github.io/OpenOOD/)
[](https://colab.research.google.com/drive/1tvTpCM1_ju82Yygu40fy7Lc0L1YrlkQF?usp=sharing)
[](https://openood.slack.com/)
<img src="https://live.staticflickr.com/65535/52145428300_78fd595193_k.jpg" width="800">
This repository reproduces representative methods within the [`Generalized Out-of-Distribution Detection Framework`](https://arxiv.org/abs/2110.11334),
aiming to make a fair comparison across methods that were initially developed for anomaly detection, novelty detection, open set recognition, and out-of-distribution detection.
This codebase is still under construction.
Comments, issues, contributions, and collaborations are all welcomed!
|  |
|:--:|
| <b>Timeline of the methods that OpenOOD supports. More methods are included as OpenOOD iterates.</b>|
## Updates
- **16 June, 2023**: :boom::boom: We are releasing OpenOOD `v1.5`, which includes the following exciting updates. A detailed changelog is provided in the [Wiki](https://github.com/Jingkang50/OpenOOD/wiki/OpenOOD-v1.5-change-log).
- A new [report](https://arxiv.org/abs/2306.09301) which provides benchmarking results on ImageNet and for full-spectrum detection.
- A unified, easy-to-use evaluator that allows evaluation by simply creating an evaluator instance and calling its functions. Check out this [colab tutorial](https://colab.research.google.com/drive/1tvTpCM1_ju82Yygu40fy7Lc0L1YrlkQF?usp=sharing)!
- A live [leaderboard](https://zjysteven.github.io/OpenOOD/) that tracks the state-of-the-art of this field.
- **14 October, 2022**: OpenOOD `v1.0` is accepted to NeurIPS 2022. Check the report [here](https://arxiv.org/abs/2210.07242).
- **14 June, 2022**: We release `v0.5`.
- **12 April, 2022**: Primary release to support [Full-Spectrum OOD Detection](https://arxiv.org/abs/2204.05306).
## FAQ
- `APS_mode` means Automatic (hyper)Parameter Searching mode, which enables the model to validate all the hyperparameters in the sweep list based on the validation ID/OOD set. The default value is False. Check [here](https://github.com/Jingkang50/OpenOOD/blob/main/configs/postprocessors/dice.yml) for example.
## Get Started
### v1.5 (up-to-date)
#### Installation
OpenOOD now supports installation via pip.
```
pip install git+https://github.com/Jingkang50/OpenOOD
```
#### Data
If you only use our evaluator, the benchmarks for evaluation will be automatically downloaded by the evaluator (again check out this [tutorial](https://colab.research.google.com/drive/1tvTpCM1_ju82Yygu40fy7Lc0L1YrlkQF?usp=sharing)). If you would like to also use OpenOOD for training, you can get all data with our [downloading script](https://github.com/Jingkang50/OpenOOD/tree/main/scripts/download). Note that ImageNet-1K training images should be downloaded from its official website.
#### Pre-trained checkpoints
OpenOOD v1.5 focuses on 4 ID datasets, and we release pre-trained models accordingly.
- CIFAR-10 [[Google Drive]](https://drive.google.com/file/d/1byGeYxM_PlLjT72wZsMQvP6popJeWBgt/view?usp=drive_link): ResNet-18 classifiers trained with cross-entropy loss from 3 training runs.
- CIFAR-100 [[Google Drive]](https://drive.google.com/file/d/1s-1oNrRtmA0pGefxXJOUVRYpaoAML0C-/view?usp=drive_link): ResNet-18 classifiers trained with cross-entropy loss from 3 training runs.
- ImageNet-200 [[Google Drive]](https://drive.google.com/file/d/1ddVmwc8zmzSjdLUO84EuV4Gz1c7vhIAs/view?usp=drive_link): ResNet-18 classifiers trained with cross-entropy loss from 3 training runs.
- ImageNet-1K [[Google Drive]](https://drive.google.com/file/d/15PdDMNRfnJ7f2oxW6lI-Ge4QJJH3Z0Fy/view?usp=drive_link): ResNet-50 classifiers including 1) the one from torchvision, 2) the ones that are trained by us with specific methods such as MOS, CIDER, and 3) the official checkpoints of data augmentation methods such as AugMix, PixMix.
Again, these checkpoints can be downloaded with the downloading script [here](https://github.com/Jingkang50/OpenOOD/tree/main/scripts/download).
### v1.0 (no longer maintained)
To setup the environment, we use `conda` to manage our dependencies.
Our developers use `CUDA 10.1` to do experiments.
You can specify the appropriate `cudatoolkit` version to install on your machine in the `environment.yml` file, and then run the following to create the `conda` environment:
```bash
conda env create -f environment.yml
conda activate openood
pip install libmr==0.1.9 # if necessary
```
Datasets and pretrained models are provided [here](https://entuedu-my.sharepoint.com/:f:/g/personal/jingkang001_e_ntu_edu_sg/Eso7IDKUKQ9AoY7hm9IU2gIBMWNnWGCYPwClpH0TASRLmg?e=kMrkVQ).
Please unzip the files if necessary.
We also provide an automatic data download script [here](https://github.com/Jingkang50/OpenOOD/blob/main/scripts/download/).
Our codebase accesses the datasets from `./data/` and pretrained models from `./results/checkpoints/` by default.
```
├── ...
├── data
│ ├── benchmark_imglist
│ ├── images_classic
│ ├── images_medical
│ └── images_largescale
├── openood
├── results
│ ├── checkpoints
│ └── ...
├── scripts
├── main.py
├── ...
```
| OOD Benchmark | MNIST | CIFAR-10 | CIFAR-100 | ImageNet-1K |
| ------------- | ----------- | -------------- | ------------ | ----------- |
| Accuracy | 98.50 | 95.24 | 77.10 | 76.17 |
| Checkpoint | [link]() | [link]() | [link]() | [link]() |
| OSR Benchmark | MNIST-6 | CIFAR-6 | CIFAR-50 | TIN-20 |
| ------------- | ----------- | -------------- | ------------ | ----------- |
| Checkpoint | [links]() | [links]() | [links]() | [links]() |
The easiest hands-on script is to train LeNet-5 on MNIST and evaluate its OOD or FS-OOD performance with MSP baseline.
```bash
sh scripts/basics/mnist/train_mnist.sh
sh scripts/ood/msp/mnist_test_ood_msp.sh
```
---
## Supported Benchmarks (10)
This part lists all the benchmarks we support. Feel free to include more.
<img src="https://live.staticflickr.com/65535/52146310895_7458dd8cbc_k.jpg" width="800">
<details open>
<summary><b>Anomaly Detection (1)</b></summary>
> - [x] [MVTec-AD](https://www.mvtec.com/company/research/datasets/mvtec-ad)
</details>
<details open>
<summary><b>Open Set Recognition (4)</b></summary>
> - [x] [MNIST-4/6]()
> - [x] [CIFAR-4/6]()
> - [x] [CIFAR-40/60]()
> - [x] [TinyImageNet-20/180]()
</details>
<details open>
<summary><b>Out-of-Distribution Detection (6)</b></summary>
> - [x] [BIMCV (A COVID X-Ray Dataset)]()
> > Near-OOD: `CT-SCAN`, `X-Ray-Bone`;<br>
> > Far-OOD: `MNIST`, `CIFAR-10`, `Texture`, `Tiny-ImageNet`;<br>
> - [x] [MNIST]()
> > Near-OOD: `NotMNIST`, `FashionMNIST`;<br>
> > Far-OOD: `Texture`, `CIFAR-10`, `TinyImageNet`, `Places365`;<br>
> - [x] [CIFAR-10]()
> > Near-OOD: `CIFAR-100`, `TinyImageNet`;<br>
> > Far-OOD: `MNIST`, `SVHN`, `Texture`, `Places365`;<br>
> - [x] [CIFAR-100]()
> > Near-OOD: `CIFAR-10`, `TinyImageNet`;<br>
> > Far-OOD: `MNIST`, `SVHN`, `Texture`, `Places365`;<br>
> - [x] [ImageNet-200]()
> > Near-OOD: `SSB-hard`, `NINCO`;<br>
> > Far-OOD: `iNaturalist`, `Texture`, `OpenImage-O`;<br>
> > Covariate-Shifted ID: `ImageNet-C`, `ImageNet-R`, `ImageNet-v2`;
> - [x] [ImageNet-1K]()
> > Near-OOD: `SSB-hard`, `NINCO`;<br>
> > Far-OOD: `iNaturalist`, `Texture`, `OpenImage-O`;<br>
> > Covariate-Shifted ID: `ImageNet-C`, `ImageNet-R`, `ImageNet-v2`;
</details>
Note that OpenOOD v1.5 emphasizes and focuses on the last 4 benchmarks for OOD detection.
---
## Supported Backbones (6)
This part lists all the backbones we will support in our codebase, including CNN-based and Transformer-based models. Backbones like ResNet-50 and Transformer have ImageNet-1K/22K pretrained models.
<details open>
<summary><b>CNN-based Backbones (4)</b></summary>
> - [x] [LeNet-5](http://yann.lecun.com/exdb/lenet/)
> - [x] [ResNet-18](https://openaccess.thecvf.com/content_cvpr_2016/html/He_Deep_Residual_Learning_CVPR_2016_paper.html)
> - [x] [WideResNet-28](https://arxiv.org/abs/1605.07146)
> - [x] [ResNet-50](https://openaccess.thecvf.com/content_cvpr_2016/html/He_Deep_Residual_Learning_CVPR_2016_paper.html) ([BiT](https://github.com/google-research/big_transfer))
</details>
<details open>
<summary><b>Transformer-based Architectures (2)</b></summary>
> - [x] [ViT](https://github.com/google-research/vision_transformer) ([DeiT](https://github.com/facebookresearch/deit))
> - [x] [Swin Transformer](https://openaccess.thecvf.com/content/ICCV2021/html/Liu_Swin_Transformer_Hierarchical_Vision_Transformer_Using_Shifted_Windows_ICCV_2021_paper.html)
</details>
---
## Supported Methods (50+)
This part lists all the methods we include in this codebase. Up to `v1.5`, we totally support **more than 50 popular methods** for generalized OOD detection.
All the supported methodolgies can be placed in the following four categories.
![density] ![reconstruction] ![classification] ![distance]
We also note our supported methodolgies with the following tags if they have special designs in the corresponding steps, compared to the standard classifier training process.
![preprocess] ![extradata] ![training] ![postprocess]
<!--
density: d0e9ff,
reconstruction: c2e2de,
classification: fdd7e6,
distance: f4d5b3 -->
<details open>
<summary><b>Anomaly Detection (5)</b></summary>
> - [x] [](https://github.com/lukasruff/Deep-SVDD-PyTorch) ![training] ![postprocess]
> - [x] []()
![training] ![postprocess]
> - [x] [](https://github.com/lukasruff/Deep-SVDD-PyTorch)
![training] ![postprocess]
> - [x] [](https://github.com/lukasruff/Deep-SVDD-PyTorch) ![training] ![postprocess]
> - [x] [](https://github.com/lukasruff/Deep-SVDD-PyTorch) ![training] ![postprocess]
</details>
<details open>
<summary><b>Open Set Recognition (3)</b></summary>
> Post-Hoc Methods (1):
> - [x] [](https://github.com/13952522076/Open-Set-Recognition) ![postprocess]
> - [x] [](https://github.com/aimerykong/OpenGAN/tree/main/utils) ![postprocess]
> Training Methods (1):
> - [x] [](https://github.com/iCGY96/ARPL) ![training] ![postprocess]
</details>
<details open>
<summary><b>Out-of-Distribution Detection (22)</b></summary>
<!--
density: d0e9ff,
reconstruction: c2e2de,
classification: fdd7e6,
distance: f4d5b3 -->
> Post-Hoc Methods (13):
> - [x] [](https://openreview.net/forum?id=Hkg4TI9xl)
> - [x] [](https://openreview.net/forum?id=H1VGkIxRZ) ![postprocess]
> - [x] [](https://papers.nips.cc/paper/2018/hash/abdeb6f575ac5c6676b747bca8d09cc2-Abstract.html) ![postprocess]
> - [x] [](https://papers.nips.cc/paper/2018/hash/abdeb6f575ac5c6676b747bca8d09cc2-Abstract.html) ![postprocess]
> - [x] [](https://github.com/VectorInstitute/gram-ood-detection) ![postprocess]
> - [x] [](https://github.com/wetliu/energy_ood) ![postprocess]
> - [x] [](https://arxiv.org/abs/2106.09022) ![postprocess]
> - [x] [](https://github.com/deeplearning-wisc/gradnorm_ood) ![postprocess]
> - [x] [](https://github.com/deeplearning-wisc/react) ![postprocess]
> - [x] [](https://github.com/hendrycks/anomaly-seg) ![postprocess]
> - [x] [](https://github.com/hendrycks/anomaly-seg) ![postprocess]
> - [x] []() ![postprocess]
> - [x] [](https://ooddetection.github.io/) ![postprocess]
> - [x] [](https://github.com/deeplearning-wisc/knn-ood) ![postprocess]
> - [x] [](https://github.com/deeplearning-wisc/dice) ![postprocess]
> - [x] [](https://github.com/KingJamesSong/RankFeat) ![postprocess]
> - [x] [](https://andrijazz.github.io/ash) ![postprocess]
> - [x] [](https://github.com/zjs975584714/SHE) ![postprocess]
> Training Methods (6):
> - [x] [](https://github.com/uoguelph-mlrg/confidence_estimation) ![preprocess] ![training]
> - [x] [](https://github.com/hendrycks/ss-ood) ![preprocess] ![training]
> - [x] [](https://github.com/guyera/Generalized-ODIN-Implementation) ![training] ![postprocess]
> - [x] [](https://github.com/alinlab/CSI) ![preprocess] ![training] ![postprocess]
> - [x] [](https://github.com/inspire-group/SSD) ![training] ![postprocess]
> - [x] [](https://github.com/deeplearning-wisc/large_scale_ood) ![training]
> - [x] [](https://github.com/deeplearning-wisc/vos) ![training] ![postprocess]
> - [x] [](https://github.com/hongxin001/logitnorm_ood) ![training] ![preprocess]
> - [x] [](https://github.com/deeplearning-wisc/cider) ![training] ![postprocess]
> - [x] [](https://github.com/deeplearning-wisc/npos) ![training] ![postprocess]
> Training With Extra Data (3):
> - [x] []() ![extradata] ![training]
> - [x] []() ![extradata] ![training]
> - [x] []() ![extradata] ![training]
> - [x] []() ![extradata] ![training]
</details>
<details open>
<summary><b>Method Uncertainty (4)</b></summary>
> - [x] []() ![training] ![postprocess]
> - [x] []() ![training]
> - [x] []() ![postprocess]
> - [x] []() ![training] ![postprocess]
</details>
<details open>
<summary><b>Data Augmentation (3)</b></summary>
> - [x] []() ![preprocess]
> - [x] []() ![preprocess]
> - [x] [](https://openreview.net/forum?id=Bygh9j09KX) ![preprocess]
> - [x] [](https://openaccess.thecvf.com/content_CVPRW_2020/html/w40/Cubuk_Randaugment_Practical_Automated_Data_Augmentation_With_a_Reduced_Search_Space_CVPRW_2020_paper.html) ![preprocess]
> - [x] [](https://github.com/google-research/augmix) ![preprocess]
> - [x] [](https://github.com/hendrycks/imagenet-r) ![preprocess]
> - [x] [](https://openaccess.thecvf.com/content/CVPR2022/html/Hendrycks_PixMix_Dreamlike_Pictures_Comprehensively_Improve_Safety_Measures_CVPR_2022_paper.html) ![preprocess]
> - [x] [](https://github.com/FrancescoPinto/RegMixup) ![preprocess]
</details>
---
## Contributing
We appreciate all contributions to improve OpenOOD.
We sincerely welcome community users to participate in these projects. Please refer to [CONTRIBUTING.md](https://github.com/Jingkang50/OpenOOD/blob/main/CONTRIBUTING.md) for the contributing guideline.
## Contributors
<a href="https://github.com/jingkang50/openood/graphs/contributors">
<img src="https://contrib.rocks/image?repo=jingkang50/openood" />
</a>
## Citation
If you find our repository useful for your research, please consider citing our paper:
```bibtex
# v1.5 report
@article{zhang2023openood,
title={OpenOOD v1.5: Enhanced Benchmark for Out-of-Distribution Detection},
author={Zhang, Jingyang and Yang, Jingkang and Wang, Pengyun and Wang, Haoqi and Lin, Yueqian and Zhang, Haoran and Sun, Yiyou and Du, Xuefeng and Zhou, Kaiyang and Zhang, Wayne and Li, Yixuan and Liu, Ziwei and Chen, Yiran and Li, Hai},
journal={arXiv preprint arXiv:2306.09301},
year={2023}
}
# v1.0 report
@article{yang2022openood,
author = {Yang, Jingkang and Wang, Pengyun and Zou, Dejian and Zhou, Zitang and Ding, Kunyuan and Peng, Wenxuan and Wang, Haoqi and Chen, Guangyao and Li, Bo and Sun, Yiyou and Du, Xuefeng and Zhou, Kaiyang and Zhang, Wayne and Hendrycks, Dan and Li, Yixuan and Liu, Ziwei},
title = {OpenOOD: Benchmarking Generalized Out-of-Distribution Detection},
year = {2022}
}
@article{yang2022fsood,
title = {Full-Spectrum Out-of-Distribution Detection},
author = {Yang, Jingkang and Zhou, Kaiyang and Liu, Ziwei},
journal={arXiv preprint arXiv:2204.05306},
year = {2022}
}
@article{yang2021oodsurvey,
title={Generalized Out-of-Distribution Detection: A Survey},
author={Yang, Jingkang and Zhou, Kaiyang and Li, Yixuan and Liu, Ziwei},
journal={arXiv preprint arXiv:2110.11334},
year={2021}
}
```
[density]: https://img.shields.io/badge/Density-d0e9ff?style=for-the-badge&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAABmJLR0QA/wD/AP+gvaeTAAACuElEQVRoge2Zu2tUQRSHv9UomO0SFKNBrJQkFnY2Ij6iJig+EMRKbIUk+B9YWxn/gdSCIIIkgmiChY3BwkZN8IVoIgQECw0KSSxmxj1z9e7eOTObXeR+cGEm95wz58e8zt1AScm68AR4a58DLc4liI5Mfxew27a3rHMuUWxodQKpKIW0G6WQdqMU0m5k75GUdAJHgb3AGjAHTAPLTRzzD+/toGvAYWWMCjAGLIlY7lkCRq1NU4kVUgEm+FtA9pmgyWJihdzCT/gbMAU8sG357maCfHOJETKEn+hjoEe877F/kzYnIvPNRSukM+P7DKj+w64KzAq7dzSpONUKGRV+P4GBOrb7rI2zv6rKtAEaIZuAD8JvvIDPOP6sJL8GNEIuCJ9fmG+aRvTiz8rZ4EwzpLjZL4v2HeBjAZ9PwF3Rv5IgD4/QGenGzILzOR4w1kn8fdUVlGmG2BkZxuwRgAXM8VqUR8AX296MOb7VxAo5Jdr3gdUA3xVgMidWMDFCNuIvpck8wzpInyESnl4he+SQsP2BuRRDqWKqYRfnoCIGEDcjw6I9gxETynfMb2kO9fKKESLrpKmIOHJ5Jau9ii6tbsxmdbZ7IsbsE3FWgK2aINoZGRS+n4F5ZRyAV5gL0uVzRBMkRojjoTKGZFq0j2kCpBAScgnmIWMM5loFUGSPDAibVfyPJy07bCwXtz80gGZGzov2LLCoiJFlAXgu+udCA2iEyJL7nsI/DxkruqxvtLR24i+BvtgBBf34S7Y3xDl0Ri5R+xlnHnN0puIltWO8AlwMcQ4R0gGMiP7tkIEKImOOYApTFfWW1jXxbhnYrh2kDtswNZsbZ6yoY72y+TS10qMLuC7evQbOhOVYmDlgv23fwFTVX21/EfPd0xA5I+34zOQl/t/+W+Ep8KYViRTkRasTKCkpKUnDb6XM8jMAxEX4AAAAAElFTkSuQmCC
[reconstruction]: https://img.shields.io/badge/Reconstruction-c2e2de?style=for-the-badge&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAABmJLR0QA/wD/AP+gvaeTAAADj0lEQVRoge3aS2tdVRQH8F80bS5Y2yYx1qEFrYpSMxBHKjrRFEV8oFB1Wuqotv0A+h3EfBQpitLGRGhsrQ9Ek0adVIUKWvARoxWvg7U35yTcpLnnnnvuLfQPhwX7+V/7sfbaax9uYLgw0kAf83gYf+IyvsfXWMBHuNIAh1owj/Ym3794D69hbFAEu8EOjOM+PIc3cRprCqV+xEm06uhwq9Hb+NWBvTiC86V2v8WhXhtuWpEyZvBZqf1ZNc1OGVM4q7+KwM14A6upj3PYV1fjd2IlNfyd/iqScRDLqZ8V7O+1wduwlBr8RIxOE4oQxmFBMYB3VG2oJaY2T/GtKb0pReAWxZI+p+KeeUcxtVOl9CYVgUnFMpvttvLT+E/Y+ekNeU0rQuyZbABmtltpp2Jzn+iQPwhF4LhihWxriZ1IFb7CaIf8QSkyis9T38evVXgHfkqFNztd54WzNwgcEtx+cA3f7HAq+EUDpKpgBJ8Kjq9sVfCDVOhIA6Sq4qjgeGqzArvxD66Kw2hYMY6/Bc89nQq8KDQ90yCpqjgjuD6bE24qZT6U5OkmGVVEHuxHckJZkYNJftkYnerIxujenFBW5O4kv2mMTnVcTPJATigrsjfJXxqjUx2Xk5zolJnvz9dDEGBMcF3LCeUZyaGhQbgfPaOsyB9J7hoEkS6xO8nfckJZkbw3arsj9xG3J/lrTigrspLkAcOPe5LM1mudItnsPtgYnerIZ95STigrspDk443RqY4nkux4nRgXsdg1xZkyjJhQOI1506+bkSv4UNjolxql1h1eFtfx95Ws1ka8Ks6R8w2R6hYjuCA4Ht6q4Ji4RrbxVP95dY1nBLdLtuGBnEyFL4hY7LBgVHi9bRzbToWWCO23RUB5WJAHeFkX/mCOVqwqbPYgMY2/BKcnu608qxiByXp5dYUpRcDw7SoNlIPYZ0VAuWnswmLisKiHK8aU9c8KU1sXrxUT+FjxHNezM7tfMbXLmvHFpkt9XhSPTbVgn2KZrYrYa6e4cK8YFdYpb+xFhcteG1oKA9AWAeWeX18TRsRTRj4n8sbu67V7RjHtbRGLPapadHICryvcjryUujaxVdESyyu7M23hkc7hLTyP+4XZ3pm+STyAF1KZuVQn178kTuyBBD/GRFT8lLgCbPeNPn9X8a5wAHtSoM6favbgMTwqftW4S5jr/Ij6O34WpnRJvLPM2cIVv4HrGf8Dfs0JOaMPQmgAAAAASUVORK5CYII=
[classification]: https://img.shields.io/badge/Classification-fdd7e6?style=for-the-badge&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAABmJLR0QA/wD/AP+gvaeTAAAD3klEQVRogcWZu09UQRTGfwILhTw0cQFRG0tQEwsrG4ydiQExFsQEY6h4KJLYU6M22qGxImpjYvwHLBQBDSq6KqBg1MSOGOWlaHAtztnMZbl79965j/2SCRvuzPnm3Jlz5ptzwaAKuABMAivaJoB+oJLoECvPHmAayBZoL4GmsCRx81Q5jL8D2oAabe3AjD57Qbg3FjvPRTXwHqhzeV7nIOmzIUiK55kObvPoc0r7TNgQJMWzrINrPPrUap8lG4IkeMp0YDFsc/S3xZ8APIFRhuxZgOMe/XLPMrZEyP73y/PWhqAfWZUZ3INwBzCnfbpsCBTdAXh6bQgqkfydBWaRgKvV1uEwvg402hAoUsCYD54p7WuFJowzbm1d/2aAelsSoAF44sEzBewOYR+QlekDxpEMs4qkwS5kJTJE48w+tbOhHMvAU2Q7Wa9EEKSBN5h9bvvmLquNBxHNywppzMrMYufMax3fHuG8rFDPZmeCiLzDOm4R0V4lRwMi/HLayZnNUkAncBdYQOJgDZhHzocscCPJyRZDA+KE05l24AuFs1KuLQAnk59yYTRinFnETDSDqN1mYLu2FmAAkzD+AcOEkCVRoxHjxG8kjXppsjKgR/tmgStxT9Av2jFOtAYY14pxxkvaJ4IUJiZ6LMb36thPRFsXCIxOTEzYSPxyTCo/E2YiYe4XYDLPLSR4g2IDuK2/T4ecSygsIG+zOYSNFkxKdkMKuQKMIUklljLVik6iOoSNGrWx6vIsjSjiuMtUrFH8Hl4MOUd+5f2/AnhF/GUqIJqtdQBzQE4DI8B5YIhkylQA3FNDAyFsDOItZ+IuUwGSabKI7LBNvzkheQ44ClwC7mAcibtMBcjenMf+QMwVPj6z9Xa4RHFH6ojIkQrgPnYS5RimFuCmhCcpfgHr0D7jAXi3IA08wgSqUzSWe4wrR1Yi58RwgX5BylRW5SOAI8BXNfIN2dtDGIcyyF5vQc6YaiQ7DWJiIgtcpbCM91umsi4fdWNU62M23w5PKGmxi5Xf4kOxMpVr+cjtS9IkssRV2kYcRq7j/iZSyNsbRc6AZbX1Abn+9gF/ka2134cz+WUqz/KRny9JueBbBc76mIAXbqqt0ZB2NsHvl6QskmoPRcC5F5EkG8DBCOwBwb4kWWcHF1xTmw+jMhjkS9JYVKTALuAnJmhXCSnRg3xJ+h7UuAfSSAKITKInKgUUFcBztfkRORdqCSnRE5MCDvSovTnkhM6HlURPRArkYVztdXj0CSzRY5cCLvihNnd69LGS6FZSIARijctAUiAkShGXsaAUcRkLShGXsSHpuIwVkcXlf7gl3GNHJu+DAAAAAElFTkSuQmCC
[distance]: https://img.shields.io/badge/Distance-f4d5b3?style=for-the-badge&&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAABmJLR0QA/wD/AP+gvaeTAAAB3ElEQVRoge2aO07DQBRFDzT5LABFNLRIFIiSgoYC0SGxAgoaPoVXQ4OggR2AxH8XSBErgB4huoTCfnjk2HHi+fiNlNMkhT2ak+vnGzmBBc7ZAo6ApbY3YsMO8A2MgUtgud3tNGMX+CGVGGWvF0SWjJnENbAP/BJZMkUJ2fQeEcmYl9MNk5s1k1F7mZlJjIEPYLXkONXJmEncAsPs/RAYlByvMhlT4or0Ex6Qy5xXnKdKZtpMDICzmvNVyMw6E3W0OjPzzkQdrSRT1hMrwDsRJVM3E7bJHJBfqodWO52CKVG1WRuZLvCQnfsJrFnut5R5ZmKWW2+RLvBILrFuv+VJmvTELLdeIbhEk56oI4iEq56oogPcZWt/ARsO1/7HdU8UCZ6Ey54QOsA9AZPw0RNd4AkFM2GTTPAkfPREjwBJ+O6J4BI+eqIHPKNgJmxQNxNNCJ6Ej57oAS9E3hNqZsImmeBJ+OoJkYi6J7xLbDIpIbjqCe8SkD6ZkJk4drx2MAkhIf2hZQScOFozyGCX4VKmNQnBlDltuEbrEoKNjBoJoYlMH2USwjwyfeAVhRKCKVPVJ+olhGky0UgIZTLRSQgJ+T8UEuCNgI3tGklGvtJElUQRkYlaQtjG7UOIBQB/hf9HJ+Iv7O8AAAAASUVORK5CYII=
[preprocess]: https://img.shields.io/badge/PreProcess-f4d5b3?style=social&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABmJLR0QA/wD/AP+gvaeTAAABDUlEQVQ4jc3TPy8EURQF8N8uS/wJGxuh0tH7CBKthk/gk6iIQiFRSEhEFEQhGoSQbERUEo2SGp1CwTa7infJZE3sbuckr5j75p5z7pk7/BesoIZGm6eG5SxBDSMdCFbwmS002mi6xU1zT7ED1fpfQtmLAexhtAVhI++hGyd4wD3KUS/jUJr9G8P8HmETBUzjGqdSuMeYwno4PMMjZrMOlnCHwagVsI23UC9iHNWoz+AlS/CEsSZHXTjABvpwiZ0YdSsc/hBMykcJEziXwi0FSTXGQVqkSl43ekNpHz1BcoV+YQXW8BwvZLGKVymPRexKoc7hQ1y0whHepXzqWJBZ41abWJA+3xAuMK/pH/gCPJhBnIabIDQAAAAASUVORK5CYII=
[training]: https://img.shields.io/badge/Training-f4d5b3?style=social&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGAAAABgCAYAAADimHc4AAAABmJLR0QA/wD/AP+gvaeTAAAH5UlEQVR4nO2dW2wVRRjHf6WVwgNVlKCERJDihUiQxhvGFjCgFEXig8QgiSHGywOCD14wmpgYiakEEyMGNUEgImBQ4+VBJdqICMGgKAhYvIBGgaIhEAVLKbTHhzkbjqczszOzs2d3YX/JvJye/c+337dnZvabSyEnJycnJycnJycd1AILgQNAwbIcKF7bt+JWn0EsxN7x5aWl4lafQbQTPQDtFbfakaqkDZBQ8KSTxnvrRZ+kDTjbyQOQMDUxas8EpgCHgSXALxH1VE1K1CarHngIGAh8Cqz2oJkoNZy+iaD8CzQbXFuPumNVofr+KIP6JgHHyq5bTbwPZqzUAG8hd8hx5EEYDMwFNiuucw1AAfgWeAQYKrluEtChuG4NUG10xylC53xZEIYjmqbOkGsKwClNvacMru8CliJ+YaB3fukvITNBMHF+aRDeRjjFdFz/nabu7RY6J4F3CHd+poJg43yX0onozFU0AydirH8VKQ5CnM5vAxYDYwzsGFP87q6YbHmTFAbBxPnfY9bGB2ULMAcYEsGuC4EHgC+BHsN6O4u2ZiYIJs7fDNQBU9EHoRtYC4yNwc7LgWXo+5vOoo11hI/EVpKCIFTRe5yvcn6AKgitwJUVsHk48K6k/sD5ASZBWFEBe7XMxM75ATcCWxHNwjbgrkoYW0YzsBE4AmwCxkm+YxKEGZUwVsUKhVFBmy9zftaoQ98nLPNVkUsy7rDmb5cBTY62pIkmxL2oOFQpQ2SMROR2dJ3atMSsi8409IOGo8AliVlXZAr6t8kTwITErHOnEf1LXQcijZEKJqP/JWxLzjRnviIjzg+4id7p3NLxfSamBUv4hww5P2A5cqN3JGmUIxuR38vaJI3SMQR1XzDLg35fYCLwHKJ5+IvTv64/i5+9BIzHz/RqM/J76UK8zKWORcgNXk+05qc/8DB2i7N+B54E+kWoF+A9hf7LEXW9MxB5+98DXBVB91bcVsUFZQ9we4T66xHzBrJ+4IIIut65H7kD3nfUqwLmI5oXV+eXlhbcf4WqXNeDjnqx0IrcSFl+JYwqxOu9D8eXltdx6xsaFHqfO2jFwhDkc7G7HPWelWj5Ks842iSb6uxGPsEfCy6rlB93qGe6ge5HwGxECmBA0bZRxc/WoZ946cZsSUw5TxjYFZRYVmW7rFIeaVlHP0SnqdJrA6410GkEdmp09mE/Ohql0VMVr6uybUcifzjU8ZRG7zPE025KHep+qQDMs7StCjio0ZOV/ZZ1aLGN/nJL/T6IsbtMawdu8woDgN0Kzf3AOZZ6qxRauuINm0pPAtdb6k9QaPUgZs9cGaexc7ylVhNmC78SC8ApxNM63UF/sULzk6iGFzVk2gsctO5EjO5M30+8EXcFqtTvPR6071Vob/KgHZD5AASJtfJyhQftkQrtgx60AzIfAFXb2t+D9gCFdpcH7QAv/klyh4zKWB9r81WrqVO3IyhJg44oPh/sQVuVu090NYOMJAOwR/G5j2UtqiGxqs7EcAlA+TB0O265FlVm0ceKubsVn29x0LoDMQwt77Nix+bl4wRmy8hLUb2IFYDrItjdhDo5Z/uCdw3yyZlE3wNUxXbKri/qfNNu7PJAAecCPyg0f8V+guZVhVZFAmCbjGtzqGOeRq8Vu3xQWDJuvoN9ezV6suI1Gfe8ZeUF7DdW9EMYrdLbiUg1h3ED8KNG52fEHIINIzR6quI1HV2LCILOQeVljkM9zejzLD2ISZfZiBx9LeJpvxSRclgXYlMPYqLflsdCdMuf/BYqeExOPfKOzmWUAWLa0PZpMy1PO9okW57eQ4rWB6k2MTQ4aPVBTKD7dv5SpztTp7TXO+rFgqoDjbKMbz7mG+vCmp0W3PdzfajQvc9RLxYGIzZdlxvZDYyOoHsbotN0df4+4JYI9TcgfwiOAedF0I2FJcid0BpRtxbxa/hNoa9y/FyiLU2sAjYo9BdF0I2N4ai3gPpIKVQhhqEvIiZT2jk9YmovfrYAMdVoO98rYxbye+kALvKgHwtrkRu9PUmjHGlDfi/eNuX5ZgryfqCASF5laYNGDeoBwDFSuOXqZvT7xLYmZ5ozupNXUhWEyeid30m0pSVJ0Yh+h2QqgjAC9b6wwPlTlVenn7CzLf4GhiVmHfCCxKgzxfkBYUFY6Ksilxkx3S6Rn/C79iYpNiHuRcWgShkiw/WwjkbE3uECYopvFpUfJc1AHH/WDXxDRg/rqCJ80arpcTVfEM85QeWMRj5Z08H/0xaZOK4GxJh5DWZBCGtPe4APcNvaFEYD4pA+3XxDcJJjZg5sCqgm/OAm2yPLdgCPIkZargxF5IS+tqj3OOFHlqXK+QEmQXAte4HXgKsN7BiLWBSg2hsQtaTS+QHVuG1kMC0n0S9/vxW7M0jPKOcH2AShA3ECoW3TpCKs6SgtXYjkoSp/lUnnB1QjHBvm/ODUkaGINLPuyJugRD26uBMxdxHM54blsTLn/IBqhOFhzi/lfMQO9A3oRysqdE7cjOiQZQt+J6EOfiadH1BN74P9jmJ23s7F+AtAveaagIn0PiNoBRV0fpxvojMQ7wCHgFcQSwNNUDnb9h84mN7bMMR6pkHAx4iDxStGGidMdE+7DWm8t16kbsfI2UYegIRJYwB8/BM2l2MTEiGNAVjpQeMNDxpnLS6rsoOyH3HAX/7PPHNycnJycnJycnJycnJycnJyUsd/Xk5Gaglg9FgAAAAASUVORK5CYII=
[extradata]: https://img.shields.io/badge/ExtraData-f4d5b3?style=social&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAYAAABw4pVUAAAABmJLR0QA/wD/AP+gvaeTAAAGYElEQVR4nO2d228VRRzHP6fl2ipFvECN1gpoQR6MVEXFN28hkQdS0FdFRf0P8E8QjXh5ML6aiAFvKD6p9U2lGkuJxEtiI1ClRWy5lYrc6sNvT/acPbN7dk9nd6dnf59ksifl7G9+85uzM7PznWFKuEcL0A3cCtwE3OJdrweu8dISoN37/kJggff5PPCv9/kcMAGc9K7/AH8CR4AR4A/gMDCdamkSUso5/4XAWuBe77raS20Z5T8F/AL8DBwAvgcG8Ss1c7KukPnAeuAR4GHgTmBuxj7U4yIwBPQDXwDfABdy9cgy7cCTwIfAJNJEzKY06fn+BBk8uWk+IeuBF4FNJC/IaeA3/Pb+CDAGjCP9wQRwxvtuZb9R2Z904Pc31wLLkP7oZu+6CliU0K8p4GPgbeDbhPfmwhzgKeAg8X+Bx4BPgO3AY0BXhv52eXlu93w4lsDvg0hZ52Tob2xagWeAYeoXZBzY7X2/Ow9n69ANPAvsQXytV55hYCsSAyfoBQaoXwnvAhtxrxOPogV4EHgDGCW6jEPAA/m4KSwA3gKuYHbwCvAV0Iejj3VC5gKbkdFXWJkvA28io8lMuQ0Zr5ucugS8B6zJ2qkMWYOU8RLmGAwCK7Nyphc4EeLIZ8DtWTniAD3APsyx+Bt52U2V+4BThsxHgMfTztxhNiIxCMblFBKzVLgR+MuQ6afIOL/odCAjs2B8xpB3H6u0IvM8wcxeI//5MJcoATupjdMAlofFLxgyedlmBk3GDmrj9bwt421IBxVsplpsZdCEtCADnMqYHUemdmbMloDhM0CnDcNNzlJkTq4ydn02DO8OGN1hw2hBeIXq2L1vw+ihgNF7bBgtCOuojt1PNowGH7urbRgtCIuobe4jidMxBzUDHebGJxjfuj/mRkZKPQ3cU1QSx6qRCtnSwD1FZXMaRoMvOGeRaRQlmmXU9r/TNgybZjL3oS+GUbQQPgs8Y8IUMn0fCcc0dZJ6hUwjiqE+KT4l4FWiYzZjooxPI4vJltrIaJZzHeHNVKoVYuqoRhCRpqiECVSZdOprEdElrLNfZSPTWcJq4HPMsRgF7jL8fcaYDK4gfJHDZWAXzb/IYRdSVlMMfgSWe9/NpEJAlrq8TvQyoH7kRXI2rcUKYx6yvvdrosu8k+plQJlVSJm7gf0hDpbTBLNzoVwr/kK540SXcRDzQrnMK6Ts+FbiLSWdAD4AnsN/rF1iObANWfF+kvrlGQaeJnz4n0uFlCkvth4y3BeWRoG9wEvABmRtbRYzyiUvrw1e3nsJH6yY0hDxFlsnqpA4BQ8aiRus+5HFEX3428/iMom/HeGodx3F36I2jqx5AtlMc8773I609eBvRVjife5EKqC8HaEHuKoBvz4C3gG+i3lPo/GLNDiTR64NmfXcg0xMxv0FupLOIjJ2H40tUnDmCTExD3lyHgUeQsbp8yLvyJ4LSAfdD3yJbMy5OAN7ieKXdYUEWYBUyjrvegfyYpm0KWmUSeBX/E2fA0hl/Gcxj1lVISZKyM6mbu/ahWyLvoHqbdFlOXQ+/pa5KfxgnqV6W/QJZFv0US8dRvqmtEm9D0m6L6/ILCZhH6Kaerqopu4Yqqk7hGrqDqGaumOopu4Iqqk7hGrqDqGauiOopu4Iqqk7gGrqDqCaugOopo5q6lVO1SNoRDV11dRzTaqp54xq6qimHu8fGzFoAdXUYxisTKqpx0c1dcdQTd0xVFN3CNXUHUI1dcdQTd0RVFN3CNXUHUI1dUdQTd0RVFN3ANXUHUA1dQdQTR3V1KucqkfQiGrqqqnnmlRTzxnnNfXTVKuEHcT4L7Nj0uyaegd+0woSy8VRN8Q5zm6E6pe8HuCHxK6ZOY+0xZXtcTNp6sFZi5F6N8SpkENUV8gW7FWIiWkkUFksQEiboGJo5XSEzdR2cqoY1sekGG6yYXghtW+pqhhGY1IMrR15BHKgVXD4pophOCbFcJvNDFoxH0C8Ez1PpJISMuEajNN+UjhNuhPzwZKqGAphiuEYMkpMhaijV1UxrI1LqkevlllL7bmGlZ29KoaSMjmcuMxKRBUzOaKKocRmRdZOzUcOcw9zqoiKYW4H3FfSi3kEVpmKoBgewKwY5kKRFcPfEcXQ+rDWBqoYWnAqLVQxdJSiK4aJyHrao4iKYSLynodqdsUwMXlXiIlmUgwVRbHK/zvve/uixi6eAAAAAElFTkSuQmCC
[postprocess]: https://img.shields.io/badge/PostProcess-f4d5b3?style=social&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGAAAABgCAYAAADimHc4AAAABmJLR0QA/wD/AP+gvaeTAAAFXElEQVR4nO2dW2gdRRjHf7bRIiKiVNSK2IqkCV4ajdfUqEWrSOKt1KpVUi+NN0SliD5oVfClCj4Vi/oo6oNI38UHxUbEB29QwUtVvEWbqmlMTc31+DCNrbDz7ew5szu7O98Pljxk58+3//85s7O7s3NAURRFURRFURRFURRFUVpnAbAGeAP4FpgAGgG3F4HDcj3iEnEh8BlhDY82hJuA/YQ3O8oQVgL/EN7ktO0VTBdZK44AdhHe3GhDuJ/wpkbdHb1H8kH+AQwAxxZYy3JLLbUNoQ2YJvkA+wPUkyWAWoRwMskHtpcwB5Y1gMqfE2wH/GXJ6ildCJVNvEmGgc+F/w8CWynwWxtbAOPAKuBjYZ8HgJcpyJvYAgAYBVYjhzAIvEQB/sQYAJQohFgDgJKEEHMAUIIQYg8AAoegARiChdDmU6wCzF+gNcvggb/3AXOtl6PfgGYYBF7wJaYBNMc1voQ0gMBoAIHRAAJT1wC+wtzRbHXryLvQugZQGTSAwGgAgdEAAqMBBCa2e0FZmR9N5YZ+AwKjAQRGAwiMBhAYDSAwGoAfjgc2Ax8Bu4ExYCdmlt1ZRRRQtrmhRXInxnDbfNNZYBvmBZbciDWAZ3Cf+PsucFRehcQYwNNkn309BBydRzGxBfAozU1/zy2EmAJ4jObNP7Q78npOiCWATcjG7gJWAIuB7Sn7bvNZWAwBPISZjGUz9Adg6SH7LwReF/afBbp8FVf3ADYim/8jsCyhXVoIW30VWOcABkk3/zShfRv27minryLrGsBdmK7CZv7PwOkOOost7cd8FVrHAAaQzR/GHLcLZ1g09vgqtm4BrAdmsJv/G9DpqHUi8IVF50NfBdcpgHXI5o8AZzpqLcF4YNN6ylfRdQlgDTCF3bBRoNtR6xTga0FrHDjBV+F1COB6ZPP/BM5x1DoVs0ybTavBwZc9vFD1APqBSeRP/nmOWsuA7wWtBvCcx9qBagdwFfISa2OYNfBcWAp8J2g1gOc91v4fVQ1gNbL5+4BLHbXagZ8ErQawxWPt/6OKAfRiDLaZ9TdwuaPWcuAXQStX8+cLqFIAKzGjEMn8VY5aHZiLMsn8zR5rT6RKAfQAf2E3awK4wlGrE/hV0GoAT3is3UpVArgI2fxJoM9RqwtzK8GmNQc84rF2kSoEcC5mLC+Zf20Grd8FrTnM84PCKHsAXZjVG22GTQHXOWp1p2jNAQ96rN2JMgewAvnTOgPc7KjVgzwHaA6z4lbhlDWAs5H76RngVketS5DPH7PAHR5rz8QSS1FjhFuPswN5hDID3Oao1Yts/gywwWPtmSnbwq2dmHv2kmG3O2pdibkusGlNA7d4rL1ppKWLNwDHFVRHO/KFUZau4mrkH56YAtZ6rL0l7sVeqI/NhWMwz2mlk+Tdjlp9yEvxTwI3OmoVwuHAN4QNoE9on2WE0od8k24SuMFRq1B6yO8HHFzYYmmbZWzen3IMk7hfMwRhLfn8hIkLQ5a2zzq2X4f8VGwC8/yg9JwPfEKxARyJ/ZPrMoNhPfaRXAMzEnK9SVcKFmD6ydcw54ZWf8Yqjcss7UZIvxYZQJ4JMX5AXxF4kmTztqe0S5v9tg/3ZwNR8zbJBm4S2mxENn8vcHF+JdeHhdhvkl2QsH8HZlKUNOl21NJWSaAbe/exCDM38x7gVcxs5rTzjZqfkYdJNnI/8k20pG0PHl+ciIW38DPUHcHcwlYykjYrwWXbTUFvtdeNdlo3f5gClqhMog4rZvU20WYas1T9ELADeB8z5CycWAKYAD7FGP4BxnBvrwfFTtI08DHgHeBxzLPcRcGqqzkncbAPfxMzHO2mQsvw/Atv0E+fkVDOBwAAAABJRU5ErkJggg==
| 33,949 | 90.509434 | 2,893 | md |
null | OpenOOD-main/environment.yml | name: openood
channels:
- pytorch
- conda-forge
dependencies:
- python=3.8
- pytorch=1.8.1
- torchvision=0.9.1
- cudatoolkit=10.1
- scikit-learn=0.24.2
- json5=0.9.6
- matplotlib=3.3.0
- scipy=1.5
- tqdm=4.62.3
- pip
- pip:
- pyyaml==5.4.1
- pre-commit
- opencv-python==4.4.0.46
- imgaug==0.4.0
- pandas==1.4.1
- diffdist==0.1
- torchlars==0.1.2
- mmcv==1.4.2
- mmcls==0.17.0
- Cython==0.29.30
- faiss-gpu==1.7.2
- gdown==4.5.1
| 502 | 16.344828 | 29 | yml |
null | OpenOOD-main/imglist_generator.py | import os
'''
path="./data/images_classic/cinic/valid"
save_path="./data/benchmark_imglist/cifar10/val_cinic10.txt"
prefix="cinic/valid/"
category=["airplane","automobile","bird","cat","deer","dog","frog","horse","ship","truck"]
with open(save_path,'a') as f:
for name in category:
label=category.index(name)
sub_path=path+'/'+name
files=os.listdir(sub_path)
for file in files:
line=prefix+name+'/'+file+' '+str(label)+'\n'
f.write(line)
f.close()
'''
path="./data/images_classic/cifar100c"
save_path="./data/benchmark_imglist/cifar100/test_cifar100c.txt"
prefix="cifar100c/"
files=os.listdir(path)
with open(save_path,'a') as f:
for file in files:
splits=file.split("_")
label=(splits[1].split("."))[0]
line=prefix+file+" "+label+'\n'
f.write(line)
f.close()
'''
path="./data/images_largescale/imagenet_v2"
save_path="./data/benchmark_imglist/imagenet/test_imagenetv2.txt"
prefix="imagenet_v2/"
with open(save_path,'a') as f:
for i in range(0,1000):
label=str(i)
sub_path=path+'/'+label
files=os.listdir(sub_path)
for file in files:
line=prefix+label+'/'+file+' '+label+'\n'
f.write(line)
f.close()
''' | 1,283 | 28.860465 | 90 | py |
null | OpenOOD-main/main.py | from openood.pipelines import get_pipeline
from openood.utils import launch, setup_config
def main(config):
"""Main entrance. Config is all you need to provide to run the code. Config
should be provided in the format of YAML and can be modified with command
line.
Example:
python main.py \
--config configs/datasets/mnist_datasets.yml \
configs/train/mnist_baseline.yml \
--dataset.image_size 32 \
--network res18
Note:
A config file is the minimum requirement.
You don't need to add "--config_key new_value"
if you don't have anything to modify.
"""
pipeline = get_pipeline(config)
pipeline.run()
if __name__ == '__main__':
config = setup_config()
# generate output directory and save the full config file
# setup_logger(config)
# pipeline = get_pipeline(config)
# pipeline.run()
launch(
main,
config.num_gpus,
num_machines=config.num_machines,
machine_rank=config.machine_rank,
dist_url='auto',
args=(config, ),
)
| 1,114 | 24.340909 | 79 | py |
null | OpenOOD-main/setup.py | import setuptools
with open('README.md', 'r', encoding='utf-8') as fh:
long_description = fh.read()
setuptools.setup(
name='openood',
version='1.5',
author='openood dev team',
author_email='jingkang001@e.ntu.edu.sg',
description=
'This package provides a unified test platform for Out-of-Distribution detection.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/Jingkang50/OpenOOD',
packages=setuptools.find_packages(),
install_requires=[
'torch>=1.13.1', 'torchvision>=0.13', 'scikit-learn', 'json5',
'matplotlib', 'scipy', 'tqdm', 'pyyaml>=5.4.1', 'pre-commit',
'opencv-python>=4.4.0.46', 'imgaug>=0.4.0', 'pandas', 'diffdist>=0.1',
'Cython>=0.29.30', 'faiss-gpu>=1.7.2', 'gdown>=4.7.1', 'libmr>=0.1.9'
],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
],
include_package_data=True,
)
| 1,065 | 34.533333 | 87 | py |
null | OpenOOD-main/configs/datasets/aircraft/aircraft.yml | dataset:
name: aircraft
num_classes: 50
pre_size: 512
image_size: 448
interpolation: bilinear
normalization_type: aircraft
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [train, val, test]
train:
dataset_class: ImglistDataset
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/aircraft/train_id.txt
batch_size: 32
shuffle: True
val:
dataset_class: ImglistDataset
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/aircraft/val_id.txt
batch_size: 200
shuffle: False
test:
dataset_class: ImglistDataset
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/aircraft/test_id.txt
batch_size: 200
shuffle: False
| 815 | 23 | 63 | yml |
null | OpenOOD-main/configs/datasets/aircraft/aircraft_oe.yml | name: aircraft_oe
dataset:
name: aircraft_oe
split_names: [train, oe, val, test]
oe:
dataset_class: ImglistDataset
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/aircraft/train_oe.txt
batch_size: 32
shuffle: True
interpolation: bilinear
| 295 | 21.769231 | 63 | yml |
null | OpenOOD-main/configs/datasets/aircraft/aircraft_ood.yml | ood_dataset:
name: aircraft_ood
num_classes: 50
dataset_class: ImglistDataset
interpolation: bilinear
batch_size: 64
shuffle: False
pre_size: 512
image_size: 448
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [val, nearood, farood]
val:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/aircraft/val_ood.txt
nearood:
datasets: [hardood]
hard:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/aircraft/test_ood_hard.txt
farood:
datasets: [easyood]
easy:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/aircraft/test_ood_easy.txt
| 735 | 24.37931 | 70 | yml |
null | OpenOOD-main/configs/datasets/cifar10/cifar10.yml | dataset:
name: cifar10
num_classes: 10
pre_size: 32
image_size: 32
interpolation: bilinear
normalization_type: cifar10
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [train, val, test]
train:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar10/train_cifar10.txt
batch_size: 128
shuffle: True
val:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar10/val_cifar10.txt
batch_size: 200
shuffle: False
test:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar10/test_cifar10.txt
batch_size: 200
shuffle: False
| 815 | 23 | 67 | yml |
null | OpenOOD-main/configs/datasets/cifar10/cifar10_double_label.yml | dataset:
name: cifar10_double_label
interpolation: bilinear
normalization_type: cifar10
split_names: [train, val, test]
num_classes: 12 # actually it's 10 classes but it has 2 groups
image_size: 32
pre_size: 32
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
train:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar10/train_cifar10_mos.txt
batch_size: 128
shuffle: True
interpolation: bilinear
val:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar10/val_cifar10_mos.txt
batch_size: 128
shuffle: False
interpolation: bilinear
test:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar10/test_cifar10_mos.txt
batch_size: 128
shuffle: False
interpolation: bilinear
| 967 | 28.333333 | 71 | yml |
null | OpenOOD-main/configs/datasets/cifar10/cifar10_extra.yml | dataset:
name: cifar10
num_classes: 10
pre_size: 32
image_size: 32
interpolation: bilinear
normalization_type: cifar10
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [train, val, test]
train:
dataset_class: ImglistExtraDataDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar10/train_cifar10.txt
batch_size: 128
shuffle: True
extra_data_pth: ./data/images_classic/cifar10_extra/stylegan_images.npy
extra_label_pth: ./data/images_classic/cifar10_extra/stylegan_labels.npy
extra_percent: 100
orig_ratio: 0.8
val:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar10/val_cifar10.txt
batch_size: 200
shuffle: False
test:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar10/test_cifar10.txt
batch_size: 200
shuffle: False
| 1,020 | 25.868421 | 76 | yml |
null | OpenOOD-main/configs/datasets/cifar10/cifar10_fsood.yml | ood_dataset:
name: cifar10_fsood
num_classes: 10
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
dataset_class: ImglistDataset
batch_size: 128
shuffle: False
split_names: [val, nearood, farood, csid]
val:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar10/val_cifar100.txt
nearood:
datasets: [cifar100, tin]
cifar100:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar10/test_cifar100.txt
tin:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar10/test_tin.txt
farood:
datasets: [mnist, svhn, texture, place365]
mnist:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar10/test_mnist.txt
svhn:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar10/test_svhn.txt
texture:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar10/test_texture.txt
place365:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar10/test_places365.txt
csid:
datasets: [cinic10]
cinic10:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar10/val_cinic10.txt
| 1,333 | 29.318182 | 70 | yml |
null | OpenOOD-main/configs/datasets/cifar10/cifar10_oe.yml | name: cifar10_oe
dataset:
name: cifar10_oe
split_names: [train, oe, val, test]
oe:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar10/train_tin597.txt
batch_size: 256
shuffle: True
interpolation: bilinear
| 294 | 21.692308 | 66 | yml |
null | OpenOOD-main/configs/datasets/cifar10/cifar10_ood.yml | ood_dataset:
name: cifar10_ood
num_classes: 10
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
dataset_class: ImglistDataset
batch_size: 128
shuffle: False
split_names: [val, nearood, farood]
val:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar10/val_tin.txt
nearood:
datasets: [cifar100, tin]
cifar100:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar10/test_cifar100.txt
tin:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar10/test_tin.txt
farood:
datasets: [mnist, svhn, texture, place365]
mnist:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar10/test_mnist.txt
svhn:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar10/test_svhn.txt
texture:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar10/test_texture.txt
place365:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar10/test_places365.txt
| 1,168 | 28.974359 | 70 | yml |
null | OpenOOD-main/configs/datasets/cifar100/cifar100.yml | dataset:
name: cifar100
num_classes: 100
image_size: 32
pre_size: 32
interpolation: bilinear
normalization_type: cifar100
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [train, val, test]
train:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar100/train_cifar100.txt
batch_size: 128
shuffle: True
val:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar100/val_cifar100.txt
batch_size: 200
shuffle: False
test:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar100/test_cifar100.txt
batch_size: 200
shuffle: False
| 824 | 23.264706 | 69 | yml |
null | OpenOOD-main/configs/datasets/cifar100/cifar100_double_label.yml | dataset:
name: cifar100_double_label
interpolation: bilinear
normalization_type: cifar100
split_names: [train, val, test]
num_classes: 120 # actually it's 100 classes but it has 20 groups
image_size: 32
pre_size: 32
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
train:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar100/train_cifar100_mos.txt
batch_size: 128
shuffle: True
interpolation: bilinear
val:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar100/val_cifar100_mos.txt
batch_size: 128
shuffle: False
interpolation: bilinear
test:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar100/test_cifar100_mos.txt
batch_size: 128
shuffle: False
interpolation: bilinear
| 978 | 28.666667 | 73 | yml |
null | OpenOOD-main/configs/datasets/cifar100/cifar100_extra.yml | dataset:
name: cifar100
num_classes: 100
pre_size: 32
image_size: 32
interpolation: bilinear
normalization_type: cifar100
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [train, val, test]
train:
dataset_class: ImglistExtraDataDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar100/train_cifar100.txt
batch_size: 128
shuffle: True
extra_data_pth: ./data/images_classic/cifar100_extra/stylegan_images.npy
extra_label_pth: ./data/images_classic/cifar100_extra/stylegan_labels.npy
extra_percent: 100
orig_ratio: 0.8
val:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar100/val_cifar100.txt
batch_size: 200
shuffle: False
test:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar100/test_cifar100.txt
batch_size: 200
shuffle: False
| 1,031 | 26.157895 | 77 | yml |
null | OpenOOD-main/configs/datasets/cifar100/cifar100_fsood.yml | ood_dataset:
name: cifar100_ood
num_classes: 100
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
dataset_class: ImglistDataset
batch_size: 128
shuffle: False
split_names: [val, nearood, farood, csid]
val:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar100/val_cifar10.txt
nearood:
datasets: [cifar10, tin]
cifar10:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar100/test_cifar10.txt
tin:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar100/test_tin.txt
farood:
datasets: [mnist, svhn, texture, places365]
mnist:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar100/test_mnist.txt
svhn:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar100/test_svhn.txt
texture:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar100/test_texture.txt
places365:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar100/test_places365.txt
csid:
datasets: [cifar100c]
cifar100c:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar100/test_cifar100c.txt
| 1,346 | 29.613636 | 71 | yml |
null | OpenOOD-main/configs/datasets/cifar100/cifar100_oe.yml | name: cifar100_oe
dataset:
name: cifar100_oe
split_names: [train, oe, val, test]
oe:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar100/train_tin597.txt
batch_size: 256
shuffle: True
interpolation: bilinear
| 297 | 21.923077 | 67 | yml |
null | OpenOOD-main/configs/datasets/cifar100/cifar100_ood.yml | ood_dataset:
name: cifar100_ood
num_classes: 100
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
dataset_class: ImglistDataset
batch_size: 128
shuffle: False
split_names: [val, nearood, farood]
val:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar100/val_tin.txt
nearood:
datasets: [cifar10, tin]
cifar10:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar100/test_cifar10.txt
tin:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar100/test_tin.txt
farood:
datasets: [mnist, svhn, texture, places365]
mnist:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar100/test_mnist.txt
svhn:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar100/test_svhn.txt
texture:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar100/test_texture.txt
places365:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/cifar100/test_places365.txt
| 1,178 | 29.230769 | 71 | yml |
null | OpenOOD-main/configs/datasets/covid/covid.yml | dataset:
name: covid
split_names: [train, val, test]
num_classes: 2
image_size: 224
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
train:
dataset_class: ImglistDataset
data_dir: ./data/covid_images/
imglist_pth: ./data/imglist/covid/train_bimcv.txt
batch_size: 128
shuffle: True
interpolation: bilinear
val:
dataset_class: ImglistDataset
data_dir: ./data/covid_images/
imglist_pth: ./data/imglist/covid/val_bimcv.txt
batch_size: 200
shuffle: False
interpolation: bilinear
test:
dataset_class: ImglistDataset
data_dir: ./data/covid_images/
imglist_pth: ./data/imglist/covid/test_bimcv.txt
batch_size: 200
shuffle: False
interpolation: bilinear
| 774 | 24.833333 | 53 | yml |
null | OpenOOD-main/configs/datasets/covid/covid_fsood.yml | ood_dataset:
name: covid_fsood
dataset_class: ImglistDataset
interpolation: bilinear
batch_size: 20
shuffle: False
num_classes: 2
image_size: 224
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [val, csid, nearood, farood]
val:
data_dir: ./data/covid_images/
imglist_pth: ./data/imglist/covid/val_ct.txt
csid:
datasets: [actmed, hannover]
actmed:
data_dir: ./data/covid_images/
imglist_pth: ./data/imglist/covid/test_actmed.txt
hannover:
data_dir: ./data/covid_images/
imglist_pth: ./data/imglist/covid/test_hannover.txt
nearood:
datasets: [ct, xraybone]
ct:
data_dir: ./data/covid_images/
imglist_pth: ./data/imglist/covid/test_ct.txt
xraybone:
data_dir: ./data/covid_images/
imglist_pth: ./data/imglist/covid/test_xraybone.txt
farood:
datasets: [mnist, cifar10, texture, tin]
mnist:
data_dir: ./data/images/
imglist_pth: ./data/imglist/covid/test_mnist.txt
cifar10:
data_dir: ./data/images/
imglist_pth: ./data/imglist/covid/test_cifar10.txt
texture:
data_dir: ./data/images/
imglist_pth: ./data/imglist/covid/test_texture.txt
tin:
data_dir: ./data/images/
imglist_pth: ./data/imglist/covid/test_tin.txt
| 1,337 | 26.875 | 57 | yml |
null | OpenOOD-main/configs/datasets/covid/covid_ood.yml | ood_dataset:
name: covid_ood
dataset_class: ImglistDataset
interpolation: bilinear
batch_size: 128
shuffle: False
num_classes: 2
image_size: 224
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [val, nearood, farood]
val:
data_dir: ./data/covid_images/
imglist_pth: ./data/imglist/covid/val_ct.txt
nearood:
datasets: [ct, xraybone]
ct:
data_dir: ./data/covid_images/
imglist_pth: ./data/imglist/covid/test_ct.txt
xraybone:
data_dir: ./data/covid_images/
imglist_pth: ./data/imglist/covid/test_xraybone.txt
farood:
datasets: [mnist, cifar10, texture, tin]
mnist:
data_dir: ./data/images/
imglist_pth: ./data/imglist/covid/test_mnist.txt
cifar10:
data_dir: ./data/images/
imglist_pth: ./data/imglist/covid/test_cifar10.txt
texture:
data_dir: ./data/images/
imglist_pth: ./data/imglist/covid/test_texture.txt
tin:
data_dir: ./data/images/
imglist_pth: ./data/imglist/covid/test_tin.txt
| 1,075 | 25.9 | 57 | yml |
null | OpenOOD-main/configs/datasets/imagenet/imagenet.yml | dataset:
name: imagenet
num_classes: 1000
pre_size: 256
image_size: 224
interpolation: bilinear
normalization_type: imagenet
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [train, val, test]
train:
dataset_class: ImglistDataset
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/train_imagenet.txt
batch_size: 128
shuffle: True
val:
dataset_class: ImglistDataset
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/val_imagenet.txt
batch_size: 128
shuffle: False
test:
dataset_class: ImglistDataset
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_imagenet.txt
batch_size: 128
shuffle: False
| 836 | 23.617647 | 69 | yml |
null | OpenOOD-main/configs/datasets/imagenet/imagenet_double_label.yml | dataset:
name: imagenet_double_label
interpolation: bilinear
normalization_type: imagenet
split_names: [train, val, test]
num_classes: 1008 # actually it's 1000 classes but it has 8 groups
image_size: 224
pre_size: 256
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
train:
dataset_class: ImglistDataset
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/train_imagenet_mos.txt
batch_size: 256
shuffle: True
interpolation: bilinear
val:
dataset_class: ImglistDataset
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/val_imagenet_mos.txt
batch_size: 256
shuffle: False
interpolation: bilinear
test:
dataset_class: ImglistDataset
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_imagenet_mos.txt
batch_size: 256
shuffle: False
interpolation: bilinear
| 990 | 29.030303 | 73 | yml |
null | OpenOOD-main/configs/datasets/imagenet/imagenet_double_label_fsood.yml | ood_dataset:
name: imagenet_fsood
num_classes: 200
dataset_class: ImglistDataset
interpolation: bilinear
batch_size: 256
shuffle: False
pre_size: 256
image_size: 224
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [val, nearood, farood, csid]
val:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/val_openimage_o.txt
nearood:
datasets: [ssb_hard, ninco]
ssb_hard:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_ssb_hard.txt
ninco:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_ninco.txt
farood:
datasets: [inaturalist, textures, openimageo]
textures:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/imagenet/test_textures.txt
inaturalist:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_inaturalist.txt
openimageo:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_openimage_o.txt
csid:
datasets: [imagenetv2, imagenetc, imagenetr]
imagenetv2:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_imagenet_v2_mos.txt
imagenetc:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_imagenet_c_mos.txt
imagenetr:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_imagenet_r_mos.txt
| 1,639 | 32.469388 | 77 | yml |
null | OpenOOD-main/configs/datasets/imagenet/imagenet_fsood.yml | ood_dataset:
name: imagenet_ood
num_classes: 1000
dataset_class: ImglistDataset
interpolation: bilinear
batch_size: 32
shuffle: False
pre_size: 256
image_size: 224
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [val, nearood, farood, csid]
val:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/val_openimage_o.txt
nearood:
datasets: [ssb_hard, ninco]
ssb_hard:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_ssb_hard.txt
ninco:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_ninco.txt
farood:
datasets: [inaturalist, textures, openimageo]
textures:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/imagenet/test_textures.txt
inaturalist:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_inaturalist.txt
openimageo:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_openimage_o.txt
csid:
datasets: [imagenetv2, imagenetc, imagenetr]
imagenetv2:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_imagenet_v2.txt
imagenetc:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_imagenet_c.txt
imagenetr:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_imagenet_r.txt
| 1,625 | 32.183673 | 73 | yml |
null | OpenOOD-main/configs/datasets/imagenet/imagenet_ood.yml | ood_dataset:
name: imagenet_ood
num_classes: 1000
dataset_class: ImglistDataset
interpolation: bilinear
batch_size: 32
shuffle: False
pre_size: 256
image_size: 224
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [val, nearood, farood]
val:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/val_openimage_o.txt
nearood:
datasets: [ssb_hard, ninco]
ssb_hard:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_ssb_hard.txt
ninco:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_ninco.txt
farood:
datasets: [inaturalist, textures, openimageo]
textures:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/imagenet/test_textures.txt
inaturalist:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_inaturalist.txt
openimageo:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_openimage_o.txt
| 1,170 | 29.815789 | 73 | yml |
null | OpenOOD-main/configs/datasets/imagenet200/imagenet200.yml | dataset:
name: imagenet200
num_classes: 200
pre_size: 256
image_size: 224
interpolation: bilinear
normalization_type: imagenet
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [train, val, test]
train:
dataset_class: ImglistDataset
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet200/train_imagenet200.txt
batch_size: 256
shuffle: True
val:
dataset_class: ImglistDataset
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet200/val_imagenet200.txt
batch_size: 256
shuffle: False
test:
dataset_class: ImglistDataset
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet200/test_imagenet200.txt
batch_size: 256
shuffle: False
| 856 | 24.205882 | 75 | yml |
null | OpenOOD-main/configs/datasets/imagenet200/imagenet200_double_label.yml | dataset:
name: imagenet200_double_label
interpolation: bilinear
normalization_type: imagenet
split_names: [train, val, test]
num_classes: 206 # actually it's 200 classes but it has 6 groups
image_size: 224
pre_size: 256
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
train:
dataset_class: ImglistDataset
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet200/train_imagenet200_mos.txt
batch_size: 256
shuffle: True
interpolation: bilinear
val:
dataset_class: ImglistDataset
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet200/val_imagenet200_mos.txt
batch_size: 256
shuffle: False
interpolation: bilinear
test:
dataset_class: ImglistDataset
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet200/test_imagenet200_mos.txt
batch_size: 256
shuffle: False
interpolation: bilinear
| 1,009 | 29.606061 | 79 | yml |
null | OpenOOD-main/configs/datasets/imagenet200/imagenet200_double_label_fsood.yml | ood_dataset:
name: imagenet200_fsood
num_classes: 200
dataset_class: ImglistDataset
interpolation: bilinear
batch_size: 256
shuffle: False
pre_size: 256
image_size: 224
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [val, nearood, farood, csid]
val:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet200/val_openimage_o.txt
nearood:
datasets: [ssb_hard, ninco]
ssb_hard:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_ssb_hard.txt
ninco:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_ninco.txt
farood:
datasets: [inaturalist, textures, openimageo]
textures:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/imagenet/test_textures.txt
inaturalist:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_inaturalist.txt
openimageo:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_openimage_o.txt
csid:
datasets: [imagenetv2, imagenetc, imagenetr]
imagenetv2:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet200/test_imagenet200_v2_mos.txt
imagenetc:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet200/test_imagenet200_c_mos.txt
imagenetr:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet200/test_imagenet200_r_mos.txt
| 1,663 | 32.959184 | 83 | yml |
null | OpenOOD-main/configs/datasets/imagenet200/imagenet200_fsood.yml | ood_dataset:
name: imagenet200_fsood
num_classes: 200
dataset_class: ImglistDataset
interpolation: bilinear
batch_size: 256
shuffle: False
pre_size: 256
image_size: 224
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [val, nearood, farood, csid]
val:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet200/val_openimage_o.txt
nearood:
datasets: [ssb_hard, ninco]
ssb_hard:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_ssb_hard.txt
ninco:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_ninco.txt
farood:
datasets: [inaturalist, textures, openimageo]
textures:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/imagenet/test_textures.txt
inaturalist:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_inaturalist.txt
openimageo:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_openimage_o.txt
csid:
datasets: [imagenetv2, imagenetc, imagenetr]
imagenetv2:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet200/test_imagenet200_v2.txt
imagenetc:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet200/test_imagenet200_c.txt
imagenetr:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet200/test_imagenet200_r.txt
| 1,651 | 32.714286 | 79 | yml |
null | OpenOOD-main/configs/datasets/imagenet200/imagenet200_oe.yml | name: imagenet200_oe
dataset:
name: imagenet200_oe
split_names: [train, oe, val, test]
oe:
dataset_class: ImglistDataset
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet200/train_imagenet800.txt
batch_size: 256
shuffle: True
interpolation: bilinear
| 314 | 23.230769 | 75 | yml |
null | OpenOOD-main/configs/datasets/imagenet200/imagenet200_ood.yml | ood_dataset:
name: imagenet200_ood
num_classes: 200
dataset_class: ImglistDataset
interpolation: bilinear
batch_size: 256
shuffle: False
pre_size: 256
image_size: 224
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [val, nearood, farood]
val:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet200/val_openimage_o.txt
nearood:
datasets: [ssb_hard, ninco]
ssb_hard:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_ssb_hard.txt
ninco:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_ninco.txt
farood:
datasets: [inaturalist, textures, openimageo]
textures:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/imagenet/test_textures.txt
inaturalist:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_inaturalist.txt
openimageo:
data_dir: ./data/images_largescale/
imglist_pth: ./data/benchmark_imglist/imagenet/test_openimage_o.txt
| 1,176 | 29.973684 | 73 | yml |
null | OpenOOD-main/configs/datasets/mnist/mnist.yml | dataset:
name: mnist
num_classes: 10
image_size: 28
pre_size: 28
interpolation: bilinear
normalization_type: mnist
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [train, val, test]
train:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/mnist/train_mnist.txt
batch_size: 128
shuffle: True
val:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/mnist/val_mnist.txt
batch_size: 200
shuffle: False
test:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/mnist/test_mnist.txt
batch_size: 200
shuffle: False
| 799 | 22.529412 | 63 | yml |
null | OpenOOD-main/configs/datasets/mnist/mnist_fsood.yml | ood_dataset:
name: mnist_fsood
num_classes: 10
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
dataset_class: ImglistDataset
batch_size: 128
shuffle: False
split_names: [val, nearood, farood, csid]
val:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/mnist/val_mnist.txt
nearood:
datasets: [notmnist, fashionmnist]
notmnist:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/mnist/test_notmnist.txt
fashionmnist:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/mnist/test_fashionmnist.txt
farood:
datasets: [texture, cifar10, tin, places365]
texture:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/mnist/test_texture.txt
cifar10:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/mnist/test_cifar10.txt
tin:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/mnist/test_tin.txt
places365:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/mnist/test_places365.txt
csid:
datasets: [svhn]
svhn:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/mnist/test_svhn.txt
| 1,336 | 29.386364 | 71 | yml |
null | OpenOOD-main/configs/datasets/mnist/mnist_ood.yml | ood_dataset:
name: mnist_ood
num_classes: 10
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
dataset_class: ImglistDataset
batch_size: 128
shuffle: False
split_names: [val, nearood, farood]
val:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/mnist/val_notmnist.txt
nearood:
datasets: [notmnist, fashionmnist]
notmnist:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/mnist/test_notmnist.txt
fashionmnist:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/mnist/test_fashionmnist.txt
farood:
datasets: [texture, cifar10, tin, places365]
texture:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/mnist/test_texture.txt
cifar10:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/mnist/test_cifar10.txt
tin:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/mnist/test_tin.txt
places365:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/mnist/test_places365.txt
| 1,189 | 29.512821 | 71 | yml |
null | OpenOOD-main/configs/datasets/mvtec/bottle.yml | dataset:
name: bottle
num_classes: 2
pre_size: 256
image_size: 256
interpolation: bilinear
normalization_type: cifar10
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [train, test, val]
train:
dataset_class: ImglistDataset
data_dir: ./data/images/
interpolation: bilinear
imglist_pth: ./data/benchmark_imglist/mvtecList/bottle_train_good.txt
batch_size: 2
shuffle: True
test:
dataset_class: ImglistDataset
data_dir: ./data/images/
interpolation: bilinear
imglist_pth: ./data/benchmark_imglist/mvtecList/bottle_test_id.txt
batch_size: 1
shuffle: False
val:
dataset_class: ImglistDataset
data_dir: ./data/images/
interpolation: bilinear
imglist_pth: ./data/benchmark_imglist/mvtecList/bottle_test_id.txt
batch_size: 1
shuffle: False
ood_dataset:
name: bottle_ood
num_classes: 2
image_size: 256
num_workers: 4
dataset_class: ImglistDataset
interpolation: bilinear
batch_size: 1
shuffle: False
split_names: [val]
val:
data_dir: ./data/images/
imglist_pth: ./data/benchmark_imglist/mvtecList/bottle_test.txt
| 1,187 | 21.415094 | 73 | yml |
null | OpenOOD-main/configs/datasets/mvtec/cable.yml | dataset:
name: cable
num_classes: 2
pre_size: 256
image_size: 256
interpolation: bilinear
normalization_type: cifar10
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [train, test, val]
train:
dataset_class: ImglistDataset
data_dir: ./data/images/
interpolation: bilinear
imglist_pth: ./data/benchmark_imglist/mvtecList/cable_train_good.txt
batch_size: 2
shuffle: True
test:
dataset_class: ImglistDataset
data_dir: ./data/images/
interpolation: bilinear
imglist_pth: ./data/benchmark_imglist/mvtecList/cable_test_id.txt
batch_size: 1
shuffle: False
val:
dataset_class: ImglistDataset
data_dir: ./data/images/
interpolation: bilinear
imglist_pth: ./data/benchmark_imglist/mvtecList/cable_test_id.txt
batch_size: 1
shuffle: False
ood_dataset:
name: cable_ood
num_classes: 2
image_size: 256
num_workers: 4
dataset_class: ImglistDataset
interpolation: bilinear
batch_size: 1
shuffle: False
split_names: [val]
val:
data_dir: ./data/images/
imglist_pth: ./data/benchmark_imglist/mvtecList/cable_test.txt
| 1,181 | 21.301887 | 72 | yml |
null | OpenOOD-main/configs/datasets/osr_cifar50/cifar50_seed1.yml | dataset:
name: cifar50_seed1
num_classes: 50
pre_size: 32
image_size: 32
interpolation: bilinear
normalization_type: cifar100
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [train, val, test]
train:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar50/train/train_cifar100_50_seed1.txt
batch_size: 128
shuffle: True
val:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar50/val/val_cifar100_50_seed1.txt
batch_size: 200
shuffle: False
test:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_id_seed1.txt
batch_size: 200
shuffle: False
| 882 | 24.970588 | 88 | yml |
null | OpenOOD-main/configs/datasets/osr_cifar50/cifar50_seed1_osr.yml | ood_dataset:
name: cifar50_seed1_osr
num_classes: 50
pre_size: 32
image_size: 32
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
dataset_class: ImglistDataset
batch_size: 128
shuffle: False
split_names: [val, osr]
val:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_id_seed1.txt
osr:
datasets: [cifar50]
cifar50:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_ood_seed1.txt
| 585 | 23.416667 | 91 | yml |
null | OpenOOD-main/configs/datasets/osr_cifar50/cifar50_seed2.yml | dataset:
name: cifar50_seed2
num_classes: 50
pre_size: 32
image_size: 32
interpolation: bilinear
normalization_type: cifar100
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [train, val, test]
train:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar50/train/train_cifar100_50_seed2.txt
batch_size: 128
shuffle: True
val:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar50/val/val_cifar100_50_seed2.txt
batch_size: 200
shuffle: False
test:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_id_seed2.txt
batch_size: 200
shuffle: False
| 882 | 24.970588 | 88 | yml |
null | OpenOOD-main/configs/datasets/osr_cifar50/cifar50_seed2_osr.yml | ood_dataset:
name: cifar50_seed2_osr
num_classes: 50
pre_size: 32
image_size: 32
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
dataset_class: ImglistDataset
batch_size: 128
shuffle: False
split_names: [val, osr]
val:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_id_seed2.txt
osr:
datasets: [cifar50]
cifar50:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_ood_seed2.txt
| 585 | 23.416667 | 91 | yml |
null | OpenOOD-main/configs/datasets/osr_cifar50/cifar50_seed3.yml | dataset:
name: cifar50_seed3
num_classes: 50
pre_size: 32
image_size: 32
interpolation: bilinear
normalization_type: cifar100
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [train, val, test]
train:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar50/train/train_cifar100_50_seed3.txt
batch_size: 128
shuffle: True
val:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar50/val/val_cifar100_50_seed3.txt
batch_size: 200
shuffle: False
test:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_id_seed3.txt
batch_size: 200
shuffle: False
| 882 | 24.970588 | 88 | yml |
null | OpenOOD-main/configs/datasets/osr_cifar50/cifar50_seed3_osr.yml | ood_dataset:
name: cifar50_seed3_osr
num_classes: 50
pre_size: 32
image_size: 32
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
dataset_class: ImglistDataset
batch_size: 128
shuffle: False
split_names: [val, osr]
val:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_id_seed3.txt
osr:
datasets: [cifar50]
cifar50:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_ood_seed3.txt
| 585 | 23.416667 | 91 | yml |
null | OpenOOD-main/configs/datasets/osr_cifar50/cifar50_seed4.yml | dataset:
name: cifar50_seed4
num_classes: 50
pre_size: 32
image_size: 32
interpolation: bilinear
normalization_type: cifar100
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [train, val, test]
train:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar50/train/train_cifar100_50_seed4.txt
batch_size: 128
shuffle: True
val:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar50/val/val_cifar100_50_seed4.txt
batch_size: 200
shuffle: False
test:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_id_seed4.txt
batch_size: 200
shuffle: False
| 882 | 24.970588 | 88 | yml |
null | OpenOOD-main/configs/datasets/osr_cifar50/cifar50_seed4_osr.yml | ood_dataset:
name: cifar50_seed4_osr
num_classes: 50
pre_size: 32
image_size: 32
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
dataset_class: ImglistDataset
batch_size: 128
shuffle: False
split_names: [val, osr]
val:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_id_seed4.txt
osr:
datasets: [cifar50]
cifar50:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_ood_seed4.txt
| 585 | 23.416667 | 91 | yml |
null | OpenOOD-main/configs/datasets/osr_cifar50/cifar50_seed5.yml | dataset:
name: cifar50_seed5
num_classes: 50
pre_size: 32
image_size: 32
interpolation: bilinear
normalization_type: cifar100
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [train, val, test]
train:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar50/train/train_cifar100_50_seed5.txt
batch_size: 128
shuffle: True
val:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar50/val/val_cifar100_50_seed5.txt
batch_size: 200
shuffle: False
test:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_id_seed5.txt
batch_size: 200
shuffle: False
| 882 | 24.970588 | 88 | yml |
null | OpenOOD-main/configs/datasets/osr_cifar50/cifar50_seed5_osr.yml | ood_dataset:
name: cifar50_seed5_osr
num_classes: 50
pre_size: 32
image_size: 32
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
dataset_class: ImglistDataset
batch_size: 128
shuffle: False
split_names: [val, osr]
val:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_id_seed5.txt
osr:
datasets: [cifar50]
cifar50:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar50/test/test_cifar100_50_ood_seed5.txt
| 585 | 23.416667 | 91 | yml |
null | OpenOOD-main/configs/datasets/osr_cifar6/cifar6_seed1.yml | dataset:
name: cifar6_seed1
num_classes: 6
pre_size: 32
image_size: 32
interpolation: bilinear
normalization_type: cifar10
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [train, val, test]
train:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar6/train/train_cifar10_6_seed1.txt
batch_size: 128
shuffle: True
val:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar6/val/val_cifar10_6_seed1.txt
batch_size: 200
shuffle: False
test:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar6/test/test_cifar10_6_id_seed1.txt
batch_size: 200
shuffle: False
| 870 | 24.617647 | 85 | yml |
null | OpenOOD-main/configs/datasets/osr_cifar6/cifar6_seed1_osr.yml | ood_dataset:
name: cifar6_seed1_osr
num_classes: 6
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
dataset_class: ImglistDataset
batch_size: 128
shuffle: False
split_names: [val, osr]
val:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar6/test/test_cifar10_6_id_seed1.txt
osr:
datasets: [cifar4]
cifar4:
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar6/test/test_cifar10_4_ood_seed1.txt
| 543 | 23.727273 | 88 | yml |
null | OpenOOD-main/configs/datasets/osr_cifar6/cifar6_seed2.yml | dataset:
name: cifar6_seed2
num_classes: 6
pre_size: 32
image_size: 32
interpolation: bilinear
normalization_type: cifar10
num_workers: '@{num_workers}'
num_gpus: '@{num_gpus}'
num_machines: '@{num_machines}'
split_names: [train, val, test]
train:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar6/train/train_cifar10_6_seed2.txt
batch_size: 128
shuffle: True
val:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar6/val/val_cifar10_6_seed2.txt
batch_size: 200
shuffle: False
test:
dataset_class: ImglistDataset
data_dir: ./data/images_classic/
imglist_pth: ./data/benchmark_imglist/osr_cifar6/test/test_cifar10_6_id_seed2.txt
batch_size: 200
shuffle: False
| 870 | 24.617647 | 85 | yml |