mirror of
https://gitlab.com/libeigen/eigen.git
synced 2026-04-10 11:34:33 +08:00
Compare commits
4 Commits
gpu-dense-
...
selfadjoin
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8346cc3410 | ||
|
|
93e9970964 | ||
|
|
3eed3b0ab9 | ||
|
|
8ddbe44799 |
@@ -451,6 +451,13 @@ EIGEN_DEVICE_FUNC SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<Mat
|
||||
// map the matrix coefficients to [-1:1] to avoid over- and underflow.
|
||||
mat = matrix.template triangularView<Lower>();
|
||||
RealScalar scale = mat.cwiseAbs().maxCoeff();
|
||||
if (!(numext::isfinite)(scale)) {
|
||||
// Input contains Inf or NaN.
|
||||
m_info = NoConvergence;
|
||||
m_isInitialized = true;
|
||||
m_eigenvectorsOk = false;
|
||||
return *this;
|
||||
}
|
||||
if (numext::is_exactly_zero(scale)) scale = RealScalar(1);
|
||||
mat.template triangularView<Lower>() /= scale;
|
||||
m_subdiag.resize(n - 1);
|
||||
@@ -470,16 +477,36 @@ EIGEN_DEVICE_FUNC SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<Mat
|
||||
template <typename MatrixType>
|
||||
SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>::computeFromTridiagonal(
|
||||
const RealVectorType& diag, const SubDiagonalType& subdiag, int options) {
|
||||
// TODO : Add an option to scale the values beforehand
|
||||
bool computeEigenvectors = (options & ComputeEigenvectors) == ComputeEigenvectors;
|
||||
|
||||
m_eivalues = diag;
|
||||
m_subdiag = subdiag;
|
||||
|
||||
// Scale the tridiagonal matrix to [-1:1] to avoid over- and underflow,
|
||||
// just like compute() does for the full matrix.
|
||||
RealScalar scale = m_eivalues.cwiseAbs().maxCoeff();
|
||||
if (m_subdiag.size() > 0) scale = numext::maxi(scale, m_subdiag.cwiseAbs().maxCoeff());
|
||||
if (!(numext::isfinite)(scale)) {
|
||||
m_info = NoConvergence;
|
||||
m_isInitialized = true;
|
||||
m_eigenvectorsOk = false;
|
||||
return *this;
|
||||
}
|
||||
if (numext::is_exactly_zero(scale)) scale = RealScalar(1);
|
||||
const bool needsScaling = scale != RealScalar(1);
|
||||
if (needsScaling) {
|
||||
m_eivalues /= scale;
|
||||
m_subdiag /= scale;
|
||||
}
|
||||
|
||||
if (computeEigenvectors) {
|
||||
m_eivec.setIdentity(diag.size(), diag.size());
|
||||
}
|
||||
m_info = internal::computeFromTridiagonal_impl(m_eivalues, m_subdiag, m_maxIterations, computeEigenvectors, m_eivec);
|
||||
|
||||
// Scale back the eigenvalues.
|
||||
if (needsScaling) m_eivalues *= scale;
|
||||
|
||||
m_isInitialized = true;
|
||||
m_eigenvectorsOk = computeEigenvectors;
|
||||
return *this;
|
||||
@@ -662,6 +689,28 @@ struct direct_selfadjoint_eigenvalues<SolverType, 3, false> {
|
||||
// compute the eigenvalues
|
||||
computeRoots(scaledMat, eivals);
|
||||
|
||||
// computeRoots produces theoretically sorted roots, but floating-point
|
||||
// rounding in the trigonometric formulas can break the ordering.
|
||||
// Enforce sorting with a branchless min/max network (3 elements).
|
||||
{
|
||||
Scalar tmp;
|
||||
if (eivals(0) > eivals(1)) {
|
||||
tmp = eivals(0);
|
||||
eivals(0) = eivals(1);
|
||||
eivals(1) = tmp;
|
||||
}
|
||||
if (eivals(1) > eivals(2)) {
|
||||
tmp = eivals(1);
|
||||
eivals(1) = eivals(2);
|
||||
eivals(2) = tmp;
|
||||
}
|
||||
if (eivals(0) > eivals(1)) {
|
||||
tmp = eivals(0);
|
||||
eivals(0) = eivals(1);
|
||||
eivals(1) = tmp;
|
||||
}
|
||||
}
|
||||
|
||||
// compute the eigenvectors
|
||||
if (computeEigenvectors) {
|
||||
if ((eivals(2) - eivals(0)) <= Eigen::NumTraits<Scalar>::epsilon()) {
|
||||
@@ -691,7 +740,7 @@ struct direct_selfadjoint_eigenvalues<SolverType, 3, false> {
|
||||
if (d0 <= 2 * Eigen::NumTraits<Scalar>::epsilon() * d1) {
|
||||
// If d0 is too small, then the two other eigenvalues are numerically the same,
|
||||
// and thus we only have to ortho-normalize the near orthogonal vector we saved above.
|
||||
eivecs.col(l) -= eivecs.col(k).dot(eivecs.col(l)) * eivecs.col(l);
|
||||
eivecs.col(l) -= eivecs.col(k).dot(eivecs.col(l)) * eivecs.col(k);
|
||||
eivecs.col(l).normalize();
|
||||
} else {
|
||||
tmp = scaledMat;
|
||||
|
||||
@@ -21,3 +21,4 @@ eigen_add_benchmark(bench_syr2 bench_syr2.cpp)
|
||||
eigen_add_benchmark(bench_construction bench_construction.cpp)
|
||||
eigen_add_benchmark(bench_fixed_size bench_fixed_size.cpp)
|
||||
eigen_add_benchmark(bench_fixed_size_double bench_fixed_size.cpp DEFINITIONS SCALAR=double)
|
||||
eigen_add_benchmark(bench_small_matrix bench_small_matrix.cpp)
|
||||
|
||||
316
benchmarks/Core/bench_small_matrix.cpp
Normal file
316
benchmarks/Core/bench_small_matrix.cpp
Normal file
@@ -0,0 +1,316 @@
|
||||
#include <benchmark/benchmark.h>
|
||||
#include <Eigen/Core>
|
||||
#include <Eigen/LU>
|
||||
#include <Eigen/Cholesky>
|
||||
#include <Eigen/QR>
|
||||
#include <Eigen/SVD>
|
||||
#include <Eigen/Eigenvalues>
|
||||
|
||||
using namespace Eigen;
|
||||
|
||||
// ============================================================================
|
||||
// Fixed-size matrix multiply (the fundamental operation)
|
||||
// ============================================================================
|
||||
|
||||
template <typename Scalar, int N>
|
||||
static void BM_MatMul(benchmark::State& state) {
|
||||
Matrix<Scalar, N, N> a, b, c;
|
||||
a.setRandom();
|
||||
b.setRandom();
|
||||
for (auto _ : state) {
|
||||
c.noalias() = a * b;
|
||||
benchmark::DoNotOptimize(c.data());
|
||||
}
|
||||
}
|
||||
|
||||
// Matrix-vector multiply
|
||||
template <typename Scalar, int N>
|
||||
static void BM_MatVec(benchmark::State& state) {
|
||||
Matrix<Scalar, N, N> a;
|
||||
Matrix<Scalar, N, 1> v, r;
|
||||
a.setRandom();
|
||||
v.setRandom();
|
||||
for (auto _ : state) {
|
||||
r.noalias() = a * v;
|
||||
benchmark::DoNotOptimize(r.data());
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Fixed-size inverse (critical for transform operations)
|
||||
// ============================================================================
|
||||
|
||||
template <typename Scalar, int N>
|
||||
EIGEN_DONT_INLINE void do_inverse(const Matrix<Scalar, N, N>& a, Matrix<Scalar, N, N>& r) {
|
||||
r = a.inverse();
|
||||
}
|
||||
|
||||
template <typename Scalar, int N>
|
||||
static void BM_Inverse(benchmark::State& state) {
|
||||
Matrix<Scalar, N, N> a, r;
|
||||
a.setRandom();
|
||||
a += Matrix<Scalar, N, N>::Identity() * Scalar(N); // ensure well-conditioned
|
||||
for (auto _ : state) {
|
||||
do_inverse(a, r);
|
||||
benchmark::DoNotOptimize(r.data());
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Fixed-size determinant
|
||||
// ============================================================================
|
||||
|
||||
template <typename Scalar, int N>
|
||||
static void BM_Determinant(benchmark::State& state) {
|
||||
Matrix<Scalar, N, N> a;
|
||||
a.setRandom();
|
||||
Scalar d;
|
||||
for (auto _ : state) {
|
||||
d = a.determinant();
|
||||
benchmark::DoNotOptimize(d);
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// LLT (Cholesky) — for SPD matrices (covariance, mass matrices)
|
||||
// ============================================================================
|
||||
|
||||
template <typename Scalar, int N>
|
||||
static void BM_LLT_Compute(benchmark::State& state) {
|
||||
Matrix<Scalar, N, N> a;
|
||||
a.setRandom();
|
||||
a = a.transpose() * a + Matrix<Scalar, N, N>::Identity(); // SPD
|
||||
LLT<Matrix<Scalar, N, N>> llt;
|
||||
for (auto _ : state) {
|
||||
llt.compute(a);
|
||||
benchmark::DoNotOptimize(&llt);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Scalar, int N>
|
||||
static void BM_LLT_Solve(benchmark::State& state) {
|
||||
Matrix<Scalar, N, N> a;
|
||||
a.setRandom();
|
||||
a = a.transpose() * a + Matrix<Scalar, N, N>::Identity();
|
||||
Matrix<Scalar, N, 1> b = Matrix<Scalar, N, 1>::Random();
|
||||
LLT<Matrix<Scalar, N, N>> llt(a);
|
||||
Matrix<Scalar, N, 1> x;
|
||||
for (auto _ : state) {
|
||||
x = llt.solve(b);
|
||||
benchmark::DoNotOptimize(x.data());
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// LDLT — for semi-definite matrices
|
||||
// ============================================================================
|
||||
|
||||
template <typename Scalar, int N>
|
||||
static void BM_LDLT_Compute(benchmark::State& state) {
|
||||
Matrix<Scalar, N, N> a;
|
||||
a.setRandom();
|
||||
a = a.transpose() * a + Matrix<Scalar, N, N>::Identity();
|
||||
LDLT<Matrix<Scalar, N, N>> ldlt;
|
||||
for (auto _ : state) {
|
||||
ldlt.compute(a);
|
||||
benchmark::DoNotOptimize(&ldlt);
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// PartialPivLU — for general square systems
|
||||
// ============================================================================
|
||||
|
||||
template <typename Scalar, int N>
|
||||
static void BM_PartialPivLU_Compute(benchmark::State& state) {
|
||||
Matrix<Scalar, N, N> a;
|
||||
a.setRandom();
|
||||
a += Matrix<Scalar, N, N>::Identity() * Scalar(N);
|
||||
PartialPivLU<Matrix<Scalar, N, N>> lu;
|
||||
for (auto _ : state) {
|
||||
lu.compute(a);
|
||||
benchmark::DoNotOptimize(lu.matrixLU().data());
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Scalar, int N>
|
||||
static void BM_PartialPivLU_Solve(benchmark::State& state) {
|
||||
Matrix<Scalar, N, N> a;
|
||||
a.setRandom();
|
||||
a += Matrix<Scalar, N, N>::Identity() * Scalar(N);
|
||||
Matrix<Scalar, N, 1> b = Matrix<Scalar, N, 1>::Random();
|
||||
PartialPivLU<Matrix<Scalar, N, N>> lu(a);
|
||||
Matrix<Scalar, N, 1> x;
|
||||
for (auto _ : state) {
|
||||
x = lu.solve(b);
|
||||
benchmark::DoNotOptimize(x.data());
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// ColPivHouseholderQR — for least-squares (camera calibration, etc.)
|
||||
// ============================================================================
|
||||
|
||||
template <typename Scalar, int Rows, int Cols>
|
||||
static void BM_ColPivQR_Compute(benchmark::State& state) {
|
||||
Matrix<Scalar, Rows, Cols> a;
|
||||
a.setRandom();
|
||||
ColPivHouseholderQR<Matrix<Scalar, Rows, Cols>> qr;
|
||||
for (auto _ : state) {
|
||||
qr.compute(a);
|
||||
benchmark::DoNotOptimize(qr.matrixR().data());
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// JacobiSVD — the workhorse for small matrices in CV
|
||||
// ============================================================================
|
||||
|
||||
template <typename Scalar, int Rows, int Cols, int Options = ComputeThinU | ComputeThinV>
|
||||
static void BM_JacobiSVD_Compute(benchmark::State& state) {
|
||||
Matrix<Scalar, Rows, Cols> a;
|
||||
a.setRandom();
|
||||
JacobiSVD<Matrix<Scalar, Rows, Cols>, Options> svd;
|
||||
for (auto _ : state) {
|
||||
svd.compute(a);
|
||||
benchmark::DoNotOptimize(svd.singularValues().data());
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Scalar, int Rows, int Cols>
|
||||
static void BM_JacobiSVD_Solve(benchmark::State& state) {
|
||||
Matrix<Scalar, Rows, Cols> a;
|
||||
a.setRandom();
|
||||
Matrix<Scalar, Rows, 1> b = Matrix<Scalar, Rows, 1>::Random();
|
||||
JacobiSVD<Matrix<Scalar, Rows, Cols>, ComputeThinU | ComputeThinV> svd(a);
|
||||
Matrix<Scalar, Cols, 1> x;
|
||||
for (auto _ : state) {
|
||||
x = svd.solve(b);
|
||||
benchmark::DoNotOptimize(x.data());
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// SelfAdjointEigenSolver — PCA, normal estimation
|
||||
// ============================================================================
|
||||
|
||||
template <typename Scalar, int N>
|
||||
static void BM_SelfAdjointEig_Compute(benchmark::State& state) {
|
||||
Matrix<Scalar, N, N> a;
|
||||
a.setRandom();
|
||||
a = a.transpose() * a;
|
||||
SelfAdjointEigenSolver<Matrix<Scalar, N, N>> eig;
|
||||
for (auto _ : state) {
|
||||
eig.compute(a);
|
||||
benchmark::DoNotOptimize(eig.eigenvalues().data());
|
||||
}
|
||||
}
|
||||
|
||||
// SelfAdjointEigenSolver::computeDirect — closed-form for 2x2 and 3x3
|
||||
template <typename Scalar, int N>
|
||||
static void BM_SelfAdjointEig_ComputeDirect(benchmark::State& state) {
|
||||
Matrix<Scalar, N, N> a;
|
||||
a.setRandom();
|
||||
a = a.transpose() * a;
|
||||
SelfAdjointEigenSolver<Matrix<Scalar, N, N>> eig;
|
||||
for (auto _ : state) {
|
||||
eig.computeDirect(a);
|
||||
benchmark::DoNotOptimize(eig.eigenvalues().data());
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Registration — focus on robotics/CV sizes
|
||||
// ============================================================================
|
||||
|
||||
// Matrix multiply: 2x2, 3x3, 4x4, 6x6
|
||||
BENCHMARK(BM_MatMul<float, 2>);
|
||||
BENCHMARK(BM_MatMul<float, 3>);
|
||||
BENCHMARK(BM_MatMul<float, 4>);
|
||||
BENCHMARK(BM_MatMul<float, 6>);
|
||||
BENCHMARK(BM_MatMul<double, 2>);
|
||||
BENCHMARK(BM_MatMul<double, 3>);
|
||||
BENCHMARK(BM_MatMul<double, 4>);
|
||||
BENCHMARK(BM_MatMul<double, 6>);
|
||||
|
||||
// Matrix-vector multiply
|
||||
BENCHMARK(BM_MatVec<float, 3>);
|
||||
BENCHMARK(BM_MatVec<float, 4>);
|
||||
BENCHMARK(BM_MatVec<float, 6>);
|
||||
BENCHMARK(BM_MatVec<double, 3>);
|
||||
BENCHMARK(BM_MatVec<double, 4>);
|
||||
BENCHMARK(BM_MatVec<double, 6>);
|
||||
|
||||
// Inverse
|
||||
BENCHMARK(BM_Inverse<float, 2>);
|
||||
BENCHMARK(BM_Inverse<float, 3>);
|
||||
BENCHMARK(BM_Inverse<float, 4>);
|
||||
BENCHMARK(BM_Inverse<double, 2>);
|
||||
BENCHMARK(BM_Inverse<double, 3>);
|
||||
BENCHMARK(BM_Inverse<double, 4>);
|
||||
|
||||
// Determinant
|
||||
BENCHMARK(BM_Determinant<float, 2>);
|
||||
BENCHMARK(BM_Determinant<float, 3>);
|
||||
BENCHMARK(BM_Determinant<float, 4>);
|
||||
BENCHMARK(BM_Determinant<double, 2>);
|
||||
BENCHMARK(BM_Determinant<double, 3>);
|
||||
BENCHMARK(BM_Determinant<double, 4>);
|
||||
|
||||
// LLT (Cholesky)
|
||||
BENCHMARK(BM_LLT_Compute<float, 3>);
|
||||
BENCHMARK(BM_LLT_Compute<float, 4>);
|
||||
BENCHMARK(BM_LLT_Compute<float, 6>);
|
||||
BENCHMARK(BM_LLT_Compute<double, 3>);
|
||||
BENCHMARK(BM_LLT_Compute<double, 4>);
|
||||
BENCHMARK(BM_LLT_Compute<double, 6>);
|
||||
BENCHMARK(BM_LLT_Solve<double, 3>);
|
||||
BENCHMARK(BM_LLT_Solve<double, 6>);
|
||||
|
||||
// LDLT
|
||||
BENCHMARK(BM_LDLT_Compute<double, 3>);
|
||||
BENCHMARK(BM_LDLT_Compute<double, 6>);
|
||||
|
||||
// PartialPivLU
|
||||
BENCHMARK(BM_PartialPivLU_Compute<float, 3>);
|
||||
BENCHMARK(BM_PartialPivLU_Compute<float, 4>);
|
||||
BENCHMARK(BM_PartialPivLU_Compute<double, 3>);
|
||||
BENCHMARK(BM_PartialPivLU_Compute<double, 4>);
|
||||
BENCHMARK(BM_PartialPivLU_Solve<double, 3>);
|
||||
BENCHMARK(BM_PartialPivLU_Solve<double, 4>);
|
||||
|
||||
// ColPivHouseholderQR
|
||||
BENCHMARK(BM_ColPivQR_Compute<float, 3, 3>);
|
||||
BENCHMARK(BM_ColPivQR_Compute<double, 3, 3>);
|
||||
BENCHMARK(BM_ColPivQR_Compute<double, 6, 6>);
|
||||
BENCHMARK(BM_ColPivQR_Compute<double, 8, 3>); // overdetermined least-squares
|
||||
|
||||
// JacobiSVD — the key CV sizes
|
||||
BENCHMARK(BM_JacobiSVD_Compute<float, 2, 2>);
|
||||
BENCHMARK(BM_JacobiSVD_Compute<float, 3, 3>);
|
||||
BENCHMARK(BM_JacobiSVD_Compute<float, 4, 4>);
|
||||
BENCHMARK(BM_JacobiSVD_Compute<double, 2, 2>);
|
||||
BENCHMARK(BM_JacobiSVD_Compute<double, 3, 3>);
|
||||
BENCHMARK(BM_JacobiSVD_Compute<double, 4, 4>);
|
||||
BENCHMARK(BM_JacobiSVD_Compute<double, 3, 4>); // projection matrix
|
||||
BENCHMARK(BM_JacobiSVD_Compute<double, 6, 6>); // manipulator Jacobian
|
||||
BENCHMARK(BM_JacobiSVD_Compute<double, 8, 9>); // fundamental matrix (8-point)
|
||||
BENCHMARK(BM_JacobiSVD_Solve<double, 3, 3>);
|
||||
BENCHMARK(BM_JacobiSVD_Solve<double, 6, 6>);
|
||||
|
||||
// Values-only SVD (when you just need singular values)
|
||||
BENCHMARK((BM_JacobiSVD_Compute<double, 3, 3, 0>));
|
||||
BENCHMARK((BM_JacobiSVD_Compute<double, 6, 6, 0>));
|
||||
|
||||
// SelfAdjointEigenSolver — PCA, normal estimation
|
||||
BENCHMARK(BM_SelfAdjointEig_Compute<float, 3>);
|
||||
BENCHMARK(BM_SelfAdjointEig_Compute<float, 4>);
|
||||
BENCHMARK(BM_SelfAdjointEig_Compute<double, 3>);
|
||||
BENCHMARK(BM_SelfAdjointEig_Compute<double, 4>);
|
||||
BENCHMARK(BM_SelfAdjointEig_Compute<double, 6>);
|
||||
|
||||
// SelfAdjointEigenSolver::computeDirect (closed-form, 2x2 and 3x3 only)
|
||||
BENCHMARK(BM_SelfAdjointEig_ComputeDirect<float, 2>);
|
||||
BENCHMARK(BM_SelfAdjointEig_ComputeDirect<float, 3>);
|
||||
BENCHMARK(BM_SelfAdjointEig_ComputeDirect<double, 2>);
|
||||
BENCHMARK(BM_SelfAdjointEig_ComputeDirect<double, 3>);
|
||||
@@ -25,8 +25,8 @@ void selfadjointeigensolver_essential_check(const MatrixType& m) {
|
||||
SelfAdjointEigenSolver<MatrixType> eiSymm(m);
|
||||
VERIFY_IS_EQUAL(eiSymm.info(), Success);
|
||||
|
||||
Index n = m.cols();
|
||||
RealScalar scaling = m.cwiseAbs().maxCoeff();
|
||||
RealScalar unitary_error_factor = RealScalar(32);
|
||||
|
||||
if (scaling < (std::numeric_limits<RealScalar>::min)()) {
|
||||
VERIFY(eiSymm.eigenvalues().cwiseAbs().maxCoeff() <= (std::numeric_limits<RealScalar>::min)());
|
||||
@@ -35,7 +35,18 @@ void selfadjointeigensolver_essential_check(const MatrixType& m) {
|
||||
(eiSymm.eigenvectors() * eiSymm.eigenvalues().asDiagonal()) / scaling);
|
||||
}
|
||||
VERIFY_IS_APPROX(m.template selfadjointView<Lower>().eigenvalues(), eiSymm.eigenvalues());
|
||||
VERIFY(eiSymm.eigenvectors().isUnitary(test_precision<RealScalar>() * unitary_error_factor));
|
||||
|
||||
// Eigenvectors must be unitary. Use a tolerance proportional to n*epsilon,
|
||||
// which is the expected rounding error for Householder-based orthogonal transformations.
|
||||
RealScalar unitary_tol = RealScalar(4) * RealScalar(numext::maxi(Index(1), n)) * NumTraits<RealScalar>::epsilon();
|
||||
// But don't go below the test_precision floor (matters for float).
|
||||
unitary_tol = numext::maxi(unitary_tol, test_precision<RealScalar>());
|
||||
VERIFY(eiSymm.eigenvectors().isUnitary(unitary_tol));
|
||||
|
||||
// Verify eigenvalues are sorted in non-decreasing order.
|
||||
for (Index i = 1; i < n; ++i) {
|
||||
VERIFY(eiSymm.eigenvalues()(i) >= eiSymm.eigenvalues()(i - 1));
|
||||
}
|
||||
|
||||
if (m.cols() <= 4) {
|
||||
SelfAdjointEigenSolver<MatrixType> eiDirect;
|
||||
@@ -58,7 +69,13 @@ void selfadjointeigensolver_essential_check(const MatrixType& m) {
|
||||
VERIFY_IS_APPROX(m.template selfadjointView<Lower>().eigenvalues() / scaling, eiDirect.eigenvalues() / scaling);
|
||||
}
|
||||
|
||||
VERIFY(eiDirect.eigenvectors().isUnitary(test_precision<RealScalar>() * unitary_error_factor));
|
||||
// Direct solver eigenvectors must also be unitary.
|
||||
VERIFY(eiDirect.eigenvectors().isUnitary(unitary_tol));
|
||||
|
||||
// Direct solver eigenvalues must also be sorted.
|
||||
for (Index i = 1; i < n; ++i) {
|
||||
VERIFY(eiDirect.eigenvalues()(i) >= eiDirect.eigenvalues()(i - 1));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -149,9 +166,15 @@ void selfadjointeigensolver(const MatrixType& m) {
|
||||
VERIFY_IS_APPROX(tridiag.diagonal(), tridiag.matrixT().diagonal());
|
||||
VERIFY_IS_APPROX(tridiag.subDiagonal(), tridiag.matrixT().template diagonal<-1>());
|
||||
Matrix<RealScalar, Dynamic, Dynamic> T = tridiag.matrixT();
|
||||
if (rows > 1 && cols > 1) {
|
||||
// FIXME check that upper and lower part are 0:
|
||||
// VERIFY(T.topRightCorner(rows-2, cols-2).template triangularView<Upper>().isZero());
|
||||
if (rows > 2) {
|
||||
// Verify that the tridiagonal matrix is actually tridiagonal (zero outside the three central diagonals).
|
||||
for (Index i = 0; i < rows; ++i) {
|
||||
for (Index j = 0; j < cols; ++j) {
|
||||
if (numext::abs(i - j) > 1) {
|
||||
VERIFY(numext::is_exactly_zero(T(i, j)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
VERIFY_IS_APPROX(tridiag.diagonal(), T.diagonal());
|
||||
VERIFY_IS_APPROX(tridiag.subDiagonal(), T.template diagonal<1>());
|
||||
@@ -170,7 +193,7 @@ void selfadjointeigensolver(const MatrixType& m) {
|
||||
eiSymmTridiag.eigenvectors().real().transpose());
|
||||
}
|
||||
|
||||
// Test matrix expponential from eigendecomposition.
|
||||
// Test matrix exponential from eigendecomposition.
|
||||
// First scale to avoid overflow.
|
||||
symmB = symmB / symmB.norm();
|
||||
eiSymm.compute(symmB);
|
||||
@@ -202,6 +225,320 @@ void selfadjointeigensolver(const MatrixType& m) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test matrices with exact eigenvalue multiplicities.
|
||||
template <typename MatrixType>
|
||||
void selfadjointeigensolver_repeated_eigenvalues(const MatrixType& m) {
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
Index n = m.rows();
|
||||
if (n < 2) return;
|
||||
|
||||
// Create a random unitary matrix via QR.
|
||||
MatrixType q = MatrixType::Random(n, n);
|
||||
HouseholderQR<MatrixType> qr(q);
|
||||
q = qr.householderQ();
|
||||
|
||||
// All eigenvalues equal (scalar multiple of identity).
|
||||
{
|
||||
RealScalar lambda = internal::random<RealScalar>(-10, 10);
|
||||
MatrixType A = lambda * MatrixType::Identity(n, n);
|
||||
selfadjointeigensolver_essential_check(A);
|
||||
}
|
||||
|
||||
// Eigenvalue of multiplicity n-1 (one distinct, rest equal).
|
||||
{
|
||||
Matrix<RealScalar, Dynamic, 1> d = Matrix<RealScalar, Dynamic, 1>::Constant(n, RealScalar(3));
|
||||
d(0) = RealScalar(-2);
|
||||
MatrixType A = (q * d.template cast<Scalar>().asDiagonal() * q.adjoint()).eval();
|
||||
A.template triangularView<StrictlyUpper>().setZero();
|
||||
selfadjointeigensolver_essential_check(A);
|
||||
}
|
||||
|
||||
// Two clusters: first half one value, second half another.
|
||||
if (n >= 4) {
|
||||
Matrix<RealScalar, Dynamic, 1> d(n);
|
||||
for (Index i = 0; i < n / 2; ++i) d(i) = RealScalar(1);
|
||||
for (Index i = n / 2; i < n; ++i) d(i) = RealScalar(5);
|
||||
MatrixType A = (q * d.template cast<Scalar>().asDiagonal() * q.adjoint()).eval();
|
||||
A.template triangularView<StrictlyUpper>().setZero();
|
||||
selfadjointeigensolver_essential_check(A);
|
||||
}
|
||||
|
||||
// Nearly repeated eigenvalues: separated by O(epsilon).
|
||||
{
|
||||
Matrix<RealScalar, Dynamic, 1> d(n);
|
||||
for (Index i = 0; i < n; ++i) {
|
||||
d(i) = RealScalar(1) + RealScalar(i) * NumTraits<RealScalar>::epsilon() * RealScalar(10);
|
||||
}
|
||||
MatrixType A = (q * d.template cast<Scalar>().asDiagonal() * q.adjoint()).eval();
|
||||
A.template triangularView<StrictlyUpper>().setZero();
|
||||
selfadjointeigensolver_essential_check(A);
|
||||
}
|
||||
}
|
||||
|
||||
// Test matrices with extreme condition numbers and eigenvalue ranges.
|
||||
template <typename MatrixType>
|
||||
void selfadjointeigensolver_extreme_eigenvalues(const MatrixType& m) {
|
||||
using std::pow;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
Index n = m.rows();
|
||||
if (n < 2) return;
|
||||
|
||||
// Create a random unitary matrix.
|
||||
MatrixType q = MatrixType::Random(n, n);
|
||||
HouseholderQR<MatrixType> qr(q);
|
||||
q = qr.householderQ();
|
||||
|
||||
// Eigenvalues spanning many orders of magnitude (high condition number).
|
||||
{
|
||||
RealScalar maxExp = RealScalar(std::numeric_limits<RealScalar>::max_exponent10) / RealScalar(4);
|
||||
Matrix<RealScalar, Dynamic, 1> d(n);
|
||||
for (Index i = 0; i < n; ++i) {
|
||||
RealScalar exponent = -maxExp + RealScalar(2) * maxExp * RealScalar(i) / RealScalar(n - 1);
|
||||
d(i) = pow(RealScalar(10), exponent);
|
||||
}
|
||||
MatrixType A = (q * d.template cast<Scalar>().asDiagonal() * q.adjoint()).eval();
|
||||
A.template triangularView<StrictlyUpper>().setZero();
|
||||
|
||||
SelfAdjointEigenSolver<MatrixType> eig(A);
|
||||
VERIFY_IS_EQUAL(eig.info(), Success);
|
||||
// For ill-conditioned matrices we can only check the relative residual.
|
||||
// ||A*V - V*D|| / ||A|| should be O(n * epsilon).
|
||||
RealScalar Anorm = A.template selfadjointView<Lower>().operatorNorm();
|
||||
if (Anorm > (std::numeric_limits<RealScalar>::min)()) {
|
||||
MatrixType residual = A.template selfadjointView<Lower>() * eig.eigenvectors() -
|
||||
eig.eigenvectors() * eig.eigenvalues().asDiagonal();
|
||||
RealScalar rel_err = residual.norm() / Anorm;
|
||||
RealScalar tol = RealScalar(4) * RealScalar(n) * NumTraits<RealScalar>::epsilon();
|
||||
VERIFY(rel_err <= tol);
|
||||
}
|
||||
// Eigenvalues must still be sorted.
|
||||
for (Index i = 1; i < n; ++i) {
|
||||
VERIFY(eig.eigenvalues()(i) >= eig.eigenvalues()(i - 1));
|
||||
}
|
||||
}
|
||||
|
||||
// Very tiny eigenvalues (near underflow).
|
||||
{
|
||||
RealScalar tiny = (std::numeric_limits<RealScalar>::min)() * RealScalar(100);
|
||||
Matrix<RealScalar, Dynamic, 1> d(n);
|
||||
for (Index i = 0; i < n; ++i) {
|
||||
d(i) = tiny * (RealScalar(1) + RealScalar(i));
|
||||
}
|
||||
MatrixType A = (q * d.template cast<Scalar>().asDiagonal() * q.adjoint()).eval();
|
||||
A.template triangularView<StrictlyUpper>().setZero();
|
||||
selfadjointeigensolver_essential_check(A);
|
||||
}
|
||||
|
||||
// Very large eigenvalues (near overflow).
|
||||
{
|
||||
RealScalar huge = (std::numeric_limits<RealScalar>::max)() / (RealScalar(n) * RealScalar(100));
|
||||
Matrix<RealScalar, Dynamic, 1> d(n);
|
||||
for (Index i = 0; i < n; ++i) {
|
||||
d(i) = huge * (RealScalar(1) + RealScalar(i) * RealScalar(0.01));
|
||||
}
|
||||
MatrixType A = (q * d.template cast<Scalar>().asDiagonal() * q.adjoint()).eval();
|
||||
A.template triangularView<StrictlyUpper>().setZero();
|
||||
selfadjointeigensolver_essential_check(A);
|
||||
}
|
||||
|
||||
// Mix of positive and negative eigenvalues.
|
||||
{
|
||||
Matrix<RealScalar, Dynamic, 1> d(n);
|
||||
for (Index i = 0; i < n; ++i) {
|
||||
d(i) = (i % 2 == 0) ? RealScalar(i + 1) : RealScalar(-(i + 1));
|
||||
}
|
||||
MatrixType A = (q * d.template cast<Scalar>().asDiagonal() * q.adjoint()).eval();
|
||||
A.template triangularView<StrictlyUpper>().setZero();
|
||||
selfadjointeigensolver_essential_check(A);
|
||||
}
|
||||
|
||||
// One zero eigenvalue among non-zero ones (rank-deficient).
|
||||
{
|
||||
Matrix<RealScalar, Dynamic, 1> d = Matrix<RealScalar, Dynamic, 1>::LinSpaced(n, RealScalar(0), RealScalar(n - 1));
|
||||
MatrixType A = (q * d.template cast<Scalar>().asDiagonal() * q.adjoint()).eval();
|
||||
A.template triangularView<StrictlyUpper>().setZero();
|
||||
selfadjointeigensolver_essential_check(A);
|
||||
}
|
||||
}
|
||||
|
||||
// Test computeFromTridiagonal with scaled inputs (regression for missing scaling).
|
||||
template <typename MatrixType>
|
||||
void selfadjointeigensolver_tridiagonal_scaled(const MatrixType& m) {
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
Index n = m.rows();
|
||||
if (n < 2) return;
|
||||
|
||||
// Create a tridiagonal matrix with large entries.
|
||||
typedef Matrix<RealScalar, Dynamic, 1> RealVectorType;
|
||||
RealVectorType diag(n), subdiag(n - 1);
|
||||
|
||||
// Case 1: Large values.
|
||||
RealScalar scale = (std::numeric_limits<RealScalar>::max)() / (RealScalar(n) * RealScalar(100));
|
||||
for (Index i = 0; i < n; ++i) diag(i) = scale * RealScalar(i + 1);
|
||||
for (Index i = 0; i < n - 1; ++i) subdiag(i) = scale * RealScalar(0.5);
|
||||
|
||||
SelfAdjointEigenSolver<MatrixType> eig1;
|
||||
eig1.computeFromTridiagonal(diag, subdiag, ComputeEigenvectors);
|
||||
VERIFY_IS_EQUAL(eig1.info(), Success);
|
||||
|
||||
// Reconstruct tridiagonal and check residual.
|
||||
Matrix<RealScalar, Dynamic, Dynamic> T = Matrix<RealScalar, Dynamic, Dynamic>::Zero(n, n);
|
||||
T.diagonal() = diag;
|
||||
T.template diagonal<1>() = subdiag;
|
||||
T.template diagonal<-1>() = subdiag;
|
||||
VERIFY_IS_APPROX(
|
||||
T, eig1.eigenvectors().real() * eig1.eigenvalues().asDiagonal() * eig1.eigenvectors().real().transpose());
|
||||
|
||||
// Case 2: Tiny values.
|
||||
scale = (std::numeric_limits<RealScalar>::min)() * RealScalar(100);
|
||||
for (Index i = 0; i < n; ++i) diag(i) = scale * RealScalar(i + 1);
|
||||
for (Index i = 0; i < n - 1; ++i) subdiag(i) = scale * RealScalar(0.5);
|
||||
|
||||
SelfAdjointEigenSolver<MatrixType> eig2;
|
||||
eig2.computeFromTridiagonal(diag, subdiag, ComputeEigenvectors);
|
||||
VERIFY_IS_EQUAL(eig2.info(), Success);
|
||||
|
||||
// Eigenvalues-only mode should produce the same eigenvalues.
|
||||
SelfAdjointEigenSolver<MatrixType> eig2v;
|
||||
eig2v.computeFromTridiagonal(diag, subdiag, EigenvaluesOnly);
|
||||
VERIFY_IS_EQUAL(eig2v.info(), Success);
|
||||
VERIFY_IS_APPROX(eig2.eigenvalues(), eig2v.eigenvalues());
|
||||
}
|
||||
|
||||
// Test with diagonal matrices (tridiagonalization is trivial).
|
||||
template <typename MatrixType>
|
||||
void selfadjointeigensolver_diagonal(const MatrixType& m) {
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
Index n = m.rows();
|
||||
|
||||
// Random diagonal matrix.
|
||||
MatrixType diag = MatrixType::Zero(n, n);
|
||||
for (Index i = 0; i < n; ++i) {
|
||||
diag(i, i) = internal::random<RealScalar>(-100, 100);
|
||||
}
|
||||
selfadjointeigensolver_essential_check(diag);
|
||||
|
||||
// The eigenvalues should be the diagonal entries, sorted.
|
||||
SelfAdjointEigenSolver<MatrixType> eig(diag);
|
||||
VERIFY_IS_EQUAL(eig.info(), Success);
|
||||
|
||||
Matrix<RealScalar, Dynamic, 1> expected_evals(n);
|
||||
for (Index i = 0; i < n; ++i) expected_evals(i) = numext::real(diag(i, i));
|
||||
std::sort(expected_evals.data(), expected_evals.data() + n);
|
||||
VERIFY_IS_APPROX(eig.eigenvalues(), expected_evals);
|
||||
}
|
||||
|
||||
// Test operatorInverseSqrt more thoroughly.
|
||||
template <typename MatrixType>
|
||||
void selfadjointeigensolver_inverse_sqrt(const MatrixType& m) {
|
||||
Index n = m.rows();
|
||||
if (n < 1) return;
|
||||
|
||||
// Create a positive-definite matrix.
|
||||
MatrixType a = MatrixType::Random(n, n);
|
||||
MatrixType spd = a.adjoint() * a + MatrixType::Identity(n, n);
|
||||
spd.template triangularView<StrictlyUpper>().setZero();
|
||||
|
||||
SelfAdjointEigenSolver<MatrixType> eig(spd);
|
||||
VERIFY_IS_EQUAL(eig.info(), Success);
|
||||
|
||||
MatrixType sqrtA = eig.operatorSqrt();
|
||||
MatrixType invSqrtA = eig.operatorInverseSqrt();
|
||||
|
||||
// sqrtA * invSqrtA should be identity.
|
||||
VERIFY_IS_APPROX(sqrtA * invSqrtA, MatrixType::Identity(n, n));
|
||||
|
||||
// invSqrtA * A * invSqrtA should be identity.
|
||||
VERIFY_IS_APPROX(invSqrtA * spd.template selfadjointView<Lower>() * invSqrtA, MatrixType::Identity(n, n));
|
||||
|
||||
// invSqrtA should be symmetric/selfadjoint.
|
||||
VERIFY_IS_APPROX(invSqrtA, invSqrtA.adjoint());
|
||||
}
|
||||
|
||||
// Test that RowMajor matrices work correctly with computeDirect.
|
||||
template <int>
|
||||
void selfadjointeigensolver_rowmajor() {
|
||||
typedef Matrix<double, 3, 3, RowMajor> RowMajorMatrix3d;
|
||||
typedef Matrix<double, 2, 2, RowMajor> RowMajorMatrix2d;
|
||||
typedef Matrix<float, 3, 3, RowMajor> RowMajorMatrix3f;
|
||||
typedef Matrix<float, 2, 2, RowMajor> RowMajorMatrix2f;
|
||||
|
||||
// 3x3 RowMajor double
|
||||
{
|
||||
RowMajorMatrix3d a = RowMajorMatrix3d::Random();
|
||||
RowMajorMatrix3d symmA = a.transpose() * a;
|
||||
SelfAdjointEigenSolver<RowMajorMatrix3d> eig;
|
||||
eig.computeDirect(symmA);
|
||||
VERIFY_IS_EQUAL(eig.info(), Success);
|
||||
// Compare with iterative solver.
|
||||
SelfAdjointEigenSolver<RowMajorMatrix3d> eigRef(symmA);
|
||||
VERIFY_IS_APPROX(eigRef.eigenvalues(), eig.eigenvalues());
|
||||
}
|
||||
|
||||
// 2x2 RowMajor double
|
||||
{
|
||||
RowMajorMatrix2d a = RowMajorMatrix2d::Random();
|
||||
RowMajorMatrix2d symmA = a.transpose() * a;
|
||||
SelfAdjointEigenSolver<RowMajorMatrix2d> eig;
|
||||
eig.computeDirect(symmA);
|
||||
VERIFY_IS_EQUAL(eig.info(), Success);
|
||||
SelfAdjointEigenSolver<RowMajorMatrix2d> eigRef(symmA);
|
||||
VERIFY_IS_APPROX(eigRef.eigenvalues(), eig.eigenvalues());
|
||||
}
|
||||
|
||||
// 3x3 RowMajor float
|
||||
{
|
||||
RowMajorMatrix3f a = RowMajorMatrix3f::Random();
|
||||
RowMajorMatrix3f symmA = a.transpose() * a;
|
||||
SelfAdjointEigenSolver<RowMajorMatrix3f> eig;
|
||||
eig.computeDirect(symmA);
|
||||
VERIFY_IS_EQUAL(eig.info(), Success);
|
||||
SelfAdjointEigenSolver<RowMajorMatrix3f> eigRef(symmA);
|
||||
VERIFY_IS_APPROX(eigRef.eigenvalues(), eig.eigenvalues());
|
||||
}
|
||||
|
||||
// 2x2 RowMajor float
|
||||
{
|
||||
RowMajorMatrix2f a = RowMajorMatrix2f::Random();
|
||||
RowMajorMatrix2f symmA = a.transpose() * a;
|
||||
SelfAdjointEigenSolver<RowMajorMatrix2f> eig;
|
||||
eig.computeDirect(symmA);
|
||||
VERIFY_IS_EQUAL(eig.info(), Success);
|
||||
SelfAdjointEigenSolver<RowMajorMatrix2f> eigRef(symmA);
|
||||
VERIFY_IS_APPROX(eigRef.eigenvalues(), eig.eigenvalues());
|
||||
}
|
||||
|
||||
// Dynamic RowMajor with iterative solver
|
||||
{
|
||||
typedef Matrix<double, Dynamic, Dynamic, RowMajor> RowMajorMatrixXd;
|
||||
int s = internal::random<int>(2, 20);
|
||||
RowMajorMatrixXd a = RowMajorMatrixXd::Random(s, s);
|
||||
RowMajorMatrixXd symmA = a.transpose() * a;
|
||||
SelfAdjointEigenSolver<RowMajorMatrixXd> eig(symmA);
|
||||
VERIFY_IS_EQUAL(eig.info(), Success);
|
||||
double scaling = symmA.cwiseAbs().maxCoeff();
|
||||
if (scaling > (std::numeric_limits<double>::min)()) {
|
||||
VERIFY_IS_APPROX((symmA.template selfadjointView<Lower>() * eig.eigenvectors()) / scaling,
|
||||
(eig.eigenvectors() * eig.eigenvalues().asDiagonal()) / scaling);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test matrix with Inf entries returns NoConvergence (similar to NaN test).
|
||||
template <int>
|
||||
void selfadjointeigensolver_inf() {
|
||||
Matrix3d m;
|
||||
m.setRandom();
|
||||
m = m * m.transpose();
|
||||
m(1, 1) = std::numeric_limits<double>::infinity();
|
||||
SelfAdjointEigenSolver<Matrix3d> eig(m);
|
||||
VERIFY_IS_EQUAL(eig.info(), NoConvergence);
|
||||
}
|
||||
|
||||
template <int>
|
||||
void bug_854() {
|
||||
Matrix3d m;
|
||||
@@ -234,6 +571,88 @@ void bug_1204() {
|
||||
SelfAdjointEigenSolver<Eigen::SparseMatrix<double> > eig(A);
|
||||
}
|
||||
|
||||
// Specific 3x3 test cases that stress the direct solver.
|
||||
template <int>
|
||||
void direct_3x3_stress() {
|
||||
// Near-planar point cloud covariance: two large eigenvalues, one near-zero.
|
||||
{
|
||||
Matrix3d m;
|
||||
m << 100, 50, 0.001, 50, 100, 0.002, 0.001, 0.002, 1e-10;
|
||||
selfadjointeigensolver_essential_check(m);
|
||||
}
|
||||
|
||||
// All equal diagonal entries (triple eigenvalue).
|
||||
{
|
||||
Matrix3d m = Matrix3d::Identity() * 7.0;
|
||||
selfadjointeigensolver_essential_check(m);
|
||||
}
|
||||
|
||||
// Two exactly equal eigenvalues (from explicit construction).
|
||||
{
|
||||
Matrix3d q;
|
||||
q << 1, 0, 0, 0, 1.0 / std::sqrt(2.0), 1.0 / std::sqrt(2.0), 0, -1.0 / std::sqrt(2.0), 1.0 / std::sqrt(2.0);
|
||||
Vector3d d(1.0, 5.0, 5.0);
|
||||
Matrix3d m = q * d.asDiagonal() * q.transpose();
|
||||
selfadjointeigensolver_essential_check(m);
|
||||
}
|
||||
|
||||
// Large off-diagonal relative to diagonal.
|
||||
{
|
||||
Matrix3d m;
|
||||
m << 1, 1000, 1000, 1000, 1, 1000, 1000, 1000, 1;
|
||||
selfadjointeigensolver_essential_check(m);
|
||||
}
|
||||
|
||||
// Nearly singular: one eigenvalue much smaller than others.
|
||||
{
|
||||
Matrix3d m;
|
||||
m << 1, 0.5, 0.3, 0.5, 1, 0.4, 0.3, 0.4, 1;
|
||||
m *= 1e15;
|
||||
Matrix3d perturbation = Matrix3d::Zero();
|
||||
perturbation(0, 0) = 1e-15;
|
||||
m += perturbation;
|
||||
selfadjointeigensolver_essential_check(m);
|
||||
}
|
||||
}
|
||||
|
||||
// Specific 2x2 test cases that stress the direct solver.
|
||||
template <int>
|
||||
void direct_2x2_stress() {
|
||||
// Equal eigenvalues.
|
||||
{
|
||||
Matrix2d m = Matrix2d::Identity() * 42.0;
|
||||
selfadjointeigensolver_essential_check(m);
|
||||
}
|
||||
|
||||
// Very small off-diagonal.
|
||||
{
|
||||
Matrix2d m;
|
||||
m << 1.0, 1e-15, 1e-15, 1.0;
|
||||
selfadjointeigensolver_essential_check(m);
|
||||
}
|
||||
|
||||
// Huge ratio between diagonal entries.
|
||||
{
|
||||
Matrix2d m;
|
||||
m << 1e100, 0, 0, 1e-100;
|
||||
selfadjointeigensolver_essential_check(m);
|
||||
}
|
||||
|
||||
// Anti-diagonal dominant.
|
||||
{
|
||||
Matrix2d m;
|
||||
m << 0, 1e10, 1e10, 0;
|
||||
selfadjointeigensolver_essential_check(m);
|
||||
}
|
||||
|
||||
// Negative entries.
|
||||
{
|
||||
Matrix2d m;
|
||||
m << -5.0, 3.0, 3.0, -5.0;
|
||||
selfadjointeigensolver_essential_check(m);
|
||||
}
|
||||
}
|
||||
|
||||
EIGEN_DECLARE_TEST(eigensolver_selfadjoint) {
|
||||
int s = 0;
|
||||
for (int i = 0; i < g_repeat; i++) {
|
||||
@@ -266,6 +685,39 @@ EIGEN_DECLARE_TEST(eigensolver_selfadjoint) {
|
||||
CALL_SUBTEST_5(selfadjointeigensolver(MatrixXcd(2, 2)));
|
||||
CALL_SUBTEST_6(selfadjointeigensolver(Matrix<double, 1, 1>()));
|
||||
CALL_SUBTEST_7(selfadjointeigensolver(Matrix<double, 2, 2>()));
|
||||
|
||||
// repeated eigenvalues
|
||||
CALL_SUBTEST_17(selfadjointeigensolver_repeated_eigenvalues(Matrix3d()));
|
||||
CALL_SUBTEST_15(selfadjointeigensolver_repeated_eigenvalues(Matrix2d()));
|
||||
CALL_SUBTEST_2(selfadjointeigensolver_repeated_eigenvalues(Matrix4d()));
|
||||
CALL_SUBTEST_4(selfadjointeigensolver_repeated_eigenvalues(MatrixXd(s, s)));
|
||||
CALL_SUBTEST_13(selfadjointeigensolver_repeated_eigenvalues(Matrix3f()));
|
||||
CALL_SUBTEST_12(selfadjointeigensolver_repeated_eigenvalues(Matrix2f()));
|
||||
CALL_SUBTEST_18(selfadjointeigensolver_repeated_eigenvalues(Matrix3cd()));
|
||||
|
||||
// extreme eigenvalues (near overflow/underflow, high condition number)
|
||||
CALL_SUBTEST_17(selfadjointeigensolver_extreme_eigenvalues(Matrix3d()));
|
||||
CALL_SUBTEST_2(selfadjointeigensolver_extreme_eigenvalues(Matrix4d()));
|
||||
CALL_SUBTEST_4(selfadjointeigensolver_extreme_eigenvalues(MatrixXd(s, s)));
|
||||
CALL_SUBTEST_13(selfadjointeigensolver_extreme_eigenvalues(Matrix3f()));
|
||||
CALL_SUBTEST_3(selfadjointeigensolver_extreme_eigenvalues(MatrixXf(s, s)));
|
||||
|
||||
// computeFromTridiagonal with scaled inputs
|
||||
CALL_SUBTEST_4(selfadjointeigensolver_tridiagonal_scaled(MatrixXd(s, s)));
|
||||
CALL_SUBTEST_3(selfadjointeigensolver_tridiagonal_scaled(MatrixXf(s, s)));
|
||||
|
||||
// diagonal matrices
|
||||
CALL_SUBTEST_17(selfadjointeigensolver_diagonal(Matrix3d()));
|
||||
CALL_SUBTEST_4(selfadjointeigensolver_diagonal(MatrixXd(s, s)));
|
||||
|
||||
// operatorInverseSqrt
|
||||
CALL_SUBTEST_17(selfadjointeigensolver_inverse_sqrt(Matrix3d()));
|
||||
CALL_SUBTEST_2(selfadjointeigensolver_inverse_sqrt(Matrix4d()));
|
||||
CALL_SUBTEST_4(selfadjointeigensolver_inverse_sqrt(MatrixXd(s, s)));
|
||||
CALL_SUBTEST_13(selfadjointeigensolver_inverse_sqrt(Matrix3f()));
|
||||
|
||||
// RowMajor
|
||||
CALL_SUBTEST_19(selfadjointeigensolver_rowmajor<0>());
|
||||
}
|
||||
|
||||
CALL_SUBTEST_17(bug_854<0>());
|
||||
@@ -273,6 +725,13 @@ EIGEN_DECLARE_TEST(eigensolver_selfadjoint) {
|
||||
CALL_SUBTEST_17(bug_1204<0>());
|
||||
CALL_SUBTEST_17(bug_1225<0>());
|
||||
|
||||
// Stress tests for direct 3x3 and 2x2 solvers.
|
||||
CALL_SUBTEST_17(direct_3x3_stress<0>());
|
||||
CALL_SUBTEST_15(direct_2x2_stress<0>());
|
||||
|
||||
// Test Inf input handling.
|
||||
CALL_SUBTEST_17(selfadjointeigensolver_inf<0>());
|
||||
|
||||
// Test problem size constructors
|
||||
s = internal::random<int>(1, EIGEN_TEST_MAX_SIZE / 4);
|
||||
CALL_SUBTEST_8(SelfAdjointEigenSolver<MatrixXf> tmp1(s));
|
||||
|
||||
Reference in New Issue
Block a user