Fix bugs and improve robustness of SelfAdjointEigenSolver, improve test coverage

libeigen/eigen!2396

Co-authored-by: Rasmus Munk Larsen <rmlarsen@gmail.com>
This commit is contained in:
Rasmus Munk Larsen
2026-04-07 21:08:29 -07:00
parent bde3a68bae
commit 110530a4d8
6 changed files with 1519 additions and 162 deletions

View File

@@ -25,7 +25,7 @@ namespace internal {
template <typename SolverType, int Size, bool IsComplex>
struct direct_selfadjoint_eigenvalues;
template <typename MatrixType, typename DiagType, typename SubDiagType>
template <bool PerBlockScaling, typename MatrixType, typename DiagType, typename SubDiagType>
EIGEN_DEVICE_FUNC ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag,
const Index maxIterations, bool computeEigenvectors,
MatrixType& eivec);
@@ -438,7 +438,7 @@ EIGEN_DEVICE_FUNC SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<Mat
m_eivec = matrix;
m_eivalues.coeffRef(0, 0) = numext::real(m_eivec.coeff(0, 0));
if (computeEigenvectors) m_eivec.setOnes(n, n);
m_info = Success;
m_info = (numext::isfinite)(m_eivalues.coeffRef(0, 0)) ? Success : NoConvergence;
m_isInitialized = true;
m_eigenvectorsOk = computeEigenvectors;
return *this;
@@ -448,18 +448,29 @@ EIGEN_DEVICE_FUNC SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<Mat
RealVectorType& diag = m_eivalues;
EigenvectorsType& mat = m_eivec;
// map the matrix coefficients to [-1:1] to avoid over- and underflow.
// Scale the matrix to [-1:1] to avoid overflow/underflow during tridiagonalization
// and subsequent QR iteration. This uniform scaling ensures the tridiagonal output is
// well-conditioned. Note: for block-diagonal matrices with widely separated scales, this
// can underflow small blocks. Users with such matrices should tridiagonalize separately
// and call computeFromTridiagonal(), which uses per-block scaling.
mat = matrix.template triangularView<Lower>();
RealScalar scale = mat.cwiseAbs().maxCoeff();
if (!(numext::isfinite)(scale)) {
// Input contains Inf or NaN.
m_info = NoConvergence;
m_isInitialized = true;
m_eigenvectorsOk = false;
return *this;
}
if (numext::is_exactly_zero(scale)) scale = RealScalar(1);
mat.template triangularView<Lower>() /= scale;
m_subdiag.resize(n - 1);
m_hcoeffs.resize(n - 1);
internal::tridiagonalization_inplace(mat, diag, m_subdiag, m_hcoeffs, m_workspace, computeEigenvectors);
m_info = internal::computeFromTridiagonal_impl(diag, m_subdiag, m_maxIterations, computeEigenvectors, m_eivec);
m_info = internal::computeFromTridiagonal_impl<false>(diag, m_subdiag, m_maxIterations, computeEigenvectors, m_eivec);
// scale back the eigen values
// Scale back the eigenvalues.
m_eivalues *= scale;
m_isInitialized = true;
@@ -470,15 +481,31 @@ EIGEN_DEVICE_FUNC SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<Mat
template <typename MatrixType>
SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>::computeFromTridiagonal(
const RealVectorType& diag, const SubDiagonalType& subdiag, int options) {
// TODO : Add an option to scale the values beforehand
bool computeEigenvectors = (options & ComputeEigenvectors) == ComputeEigenvectors;
m_eivalues = diag;
m_subdiag = subdiag;
// Check for Inf/NaN in the input.
{
RealScalar scale = RealScalar(0);
if (m_eivalues.size() > 0) scale = m_eivalues.cwiseAbs().maxCoeff();
if (m_subdiag.size() > 0) scale = numext::maxi(scale, m_subdiag.cwiseAbs().maxCoeff());
if (!(numext::isfinite)(scale)) {
m_info = NoConvergence;
m_isInitialized = true;
m_eigenvectorsOk = false;
return *this;
}
}
if (computeEigenvectors) {
m_eivec.setIdentity(diag.size(), diag.size());
}
m_info = internal::computeFromTridiagonal_impl(m_eivalues, m_subdiag, m_maxIterations, computeEigenvectors, m_eivec);
// Use per-deflation-block scaling (like LAPACK's DSTERF) to avoid losing
// precision when the tridiagonal entries span a wide range of magnitudes.
m_info =
internal::computeFromTridiagonal_impl<true>(m_eivalues, m_subdiag, m_maxIterations, computeEigenvectors, m_eivec);
m_isInitialized = true;
m_eigenvectorsOk = computeEigenvectors;
@@ -490,6 +517,10 @@ namespace internal {
* \internal
* \brief Compute the eigendecomposition from a tridiagonal matrix
*
* \tparam PerBlockScaling If true, each deflation block is independently scaled to [-1,1] before
* QR iteration, following LAPACK's DSTERF approach. This prevents precision loss when entries
* span a wide range of magnitudes. When false, the caller is responsible for ensuring the
* entries are in a safe range (e.g. by pre-scaling the dense matrix before tridiagonalization).
* \param[in,out] diag : On input, the diagonal of the matrix, on output the eigenvalues
* \param[in,out] subdiag : The subdiagonal part of the matrix (entries are modified during the decomposition)
* \param[in] maxIterations : the maximum number of iterations
@@ -497,7 +528,7 @@ namespace internal {
* \param[out] eivec : The matrix to store the eigenvectors if computeEigenvectors==true. Must be allocated on input.
* \returns \c Success or \c NoConvergence
*/
template <typename MatrixType, typename DiagType, typename SubDiagType>
template <bool PerBlockScaling, typename MatrixType, typename DiagType, typename SubDiagType>
EIGEN_DEVICE_FUNC ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag,
const Index maxIterations, bool computeEigenvectors,
MatrixType& eivec) {
@@ -512,21 +543,32 @@ EIGEN_DEVICE_FUNC ComputationInfo computeFromTridiagonal_impl(DiagType& diag, Su
typedef typename DiagType::RealScalar RealScalar;
const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();
const RealScalar precision_inv = RealScalar(1) / NumTraits<RealScalar>::epsilon();
while (end > 0) {
for (Index i = start; i < end; ++i) {
// Helper lambda for the deflation test.
auto deflate = [&](Index lo, Index hi) {
for (Index i = lo; i < hi; ++i) {
if (numext::abs(subdiag[i]) < considerAsZero) {
subdiag[i] = RealScalar(0);
} else {
// abs(subdiag[i]) <= epsilon * sqrt(abs(diag[i]) + abs(diag[i+1]))
// Scaled to prevent underflows.
const RealScalar scaled_subdiag = precision_inv * subdiag[i];
if (scaled_subdiag * scaled_subdiag <= (numext::abs(diag[i]) + numext::abs(diag[i + 1]))) {
subdiag[i] = RealScalar(0);
}
}
}
};
// find the largest unreduced block at the end of the matrix.
// For per-block scaling, track the currently scaled block and its scale factor.
// When the outer loop identifies a block outside the scaled region, unscale the old
// block and scale the new one. This keeps the same outer loop structure (one QR step
// per iteration) while ensuring each block is processed in scaled coordinates.
Index scaled_start = -1, scaled_end = -1;
RealScalar block_scale = RealScalar(1);
while (end > 0) {
deflate(start, end);
// Find the largest unreduced block at the end of the matrix.
while (end > 0 && numext::is_exactly_zero(subdiag[end - 1])) {
end--;
}
@@ -539,9 +581,42 @@ EIGEN_DEVICE_FUNC ComputationInfo computeFromTridiagonal_impl(DiagType& diag, Su
start = end - 1;
while (start > 0 && !numext::is_exactly_zero(subdiag[start - 1])) start--;
if (PerBlockScaling) {
// Check if we've moved to a different block than the one currently scaled.
if (start != scaled_start || end != scaled_end) {
// Unscale the previous block if it was scaled.
if (block_scale != RealScalar(1)) {
for (Index i = scaled_start; i <= scaled_end; ++i) diag[i] /= block_scale;
for (Index i = scaled_start; i < scaled_end; ++i) {
if (!numext::is_exactly_zero(subdiag[i])) subdiag[i] /= block_scale;
}
block_scale = RealScalar(1);
}
// Compute the norm and scale the new block to [-1:1].
RealScalar block_norm = RealScalar(0);
for (Index i = start; i <= end; ++i) block_norm = numext::maxi(block_norm, numext::abs(diag[i]));
for (Index i = start; i < end; ++i) block_norm = numext::maxi(block_norm, numext::abs(subdiag[i]));
if (block_norm > RealScalar(0) && block_norm != RealScalar(1)) {
block_scale = RealScalar(1) / block_norm;
for (Index i = start; i <= end; ++i) diag[i] *= block_scale;
for (Index i = start; i < end; ++i) subdiag[i] *= block_scale;
}
scaled_start = start;
scaled_end = end;
}
}
internal::tridiagonal_qr_step<MatrixType::Flags & RowMajorBit ? RowMajor : ColMajor>(
diag.data(), subdiag.data(), start, end, computeEigenvectors ? eivec.data() : (Scalar*)0, n);
}
// Unscale any remaining scaled block.
if (PerBlockScaling && block_scale != RealScalar(1)) {
for (Index i = scaled_start; i <= scaled_end; ++i) diag[i] /= block_scale;
for (Index i = scaled_start; i < scaled_end; ++i) {
if (!numext::is_exactly_zero(subdiag[i])) subdiag[i] /= block_scale;
}
}
if (iter <= maxIterations * n)
info = Success;
else
@@ -662,6 +737,28 @@ struct direct_selfadjoint_eigenvalues<SolverType, 3, false> {
// compute the eigenvalues
computeRoots(scaledMat, eivals);
// computeRoots produces theoretically sorted roots, but floating-point
// rounding in the trigonometric formulas can break the ordering.
// Enforce sorting with a branchless min/max network (3 elements).
{
Scalar tmp;
if (eivals(0) > eivals(1)) {
tmp = eivals(0);
eivals(0) = eivals(1);
eivals(1) = tmp;
}
if (eivals(1) > eivals(2)) {
tmp = eivals(1);
eivals(1) = eivals(2);
eivals(2) = tmp;
}
if (eivals(0) > eivals(1)) {
tmp = eivals(0);
eivals(0) = eivals(1);
eivals(1) = tmp;
}
}
// compute the eigenvectors
if (computeEigenvectors) {
if ((eivals(2) - eivals(0)) <= Eigen::NumTraits<Scalar>::epsilon()) {
@@ -691,7 +788,7 @@ struct direct_selfadjoint_eigenvalues<SolverType, 3, false> {
if (d0 <= 2 * Eigen::NumTraits<Scalar>::epsilon() * d1) {
// If d0 is too small, then the two other eigenvalues are numerically the same,
// and thus we only have to ortho-normalize the near orthogonal vector we saved above.
eivecs.col(l) -= eivecs.col(k).dot(eivecs.col(l)) * eivecs.col(l);
eivecs.col(l) -= eivecs.col(k).dot(eivecs.col(l)) * eivecs.col(k);
eivecs.col(l).normalize();
} else {
tmp = scaledMat;

View File

@@ -21,3 +21,4 @@ eigen_add_benchmark(bench_syr2 bench_syr2.cpp)
eigen_add_benchmark(bench_construction bench_construction.cpp)
eigen_add_benchmark(bench_fixed_size bench_fixed_size.cpp)
eigen_add_benchmark(bench_fixed_size_double bench_fixed_size.cpp DEFINITIONS SCALAR=double)
eigen_add_benchmark(bench_small_matrix bench_small_matrix.cpp)

View File

@@ -0,0 +1,316 @@
#include <benchmark/benchmark.h>
#include <Eigen/Core>
#include <Eigen/LU>
#include <Eigen/Cholesky>
#include <Eigen/QR>
#include <Eigen/SVD>
#include <Eigen/Eigenvalues>
using namespace Eigen;
// ============================================================================
// Fixed-size matrix multiply (the fundamental operation)
// ============================================================================
template <typename Scalar, int N>
static void BM_MatMul(benchmark::State& state) {
Matrix<Scalar, N, N> a, b, c;
a.setRandom();
b.setRandom();
for (auto _ : state) {
c.noalias() = a * b;
benchmark::DoNotOptimize(c.data());
}
}
// Matrix-vector multiply
template <typename Scalar, int N>
static void BM_MatVec(benchmark::State& state) {
Matrix<Scalar, N, N> a;
Matrix<Scalar, N, 1> v, r;
a.setRandom();
v.setRandom();
for (auto _ : state) {
r.noalias() = a * v;
benchmark::DoNotOptimize(r.data());
}
}
// ============================================================================
// Fixed-size inverse (critical for transform operations)
// ============================================================================
template <typename Scalar, int N>
EIGEN_DONT_INLINE void do_inverse(const Matrix<Scalar, N, N>& a, Matrix<Scalar, N, N>& r) {
r = a.inverse();
}
template <typename Scalar, int N>
static void BM_Inverse(benchmark::State& state) {
Matrix<Scalar, N, N> a, r;
a.setRandom();
a += Matrix<Scalar, N, N>::Identity() * Scalar(N); // ensure well-conditioned
for (auto _ : state) {
do_inverse(a, r);
benchmark::DoNotOptimize(r.data());
}
}
// ============================================================================
// Fixed-size determinant
// ============================================================================
template <typename Scalar, int N>
static void BM_Determinant(benchmark::State& state) {
Matrix<Scalar, N, N> a;
a.setRandom();
Scalar d;
for (auto _ : state) {
d = a.determinant();
benchmark::DoNotOptimize(d);
}
}
// ============================================================================
// LLT (Cholesky) — for SPD matrices (covariance, mass matrices)
// ============================================================================
template <typename Scalar, int N>
static void BM_LLT_Compute(benchmark::State& state) {
Matrix<Scalar, N, N> a;
a.setRandom();
a = a.transpose() * a + Matrix<Scalar, N, N>::Identity(); // SPD
LLT<Matrix<Scalar, N, N>> llt;
for (auto _ : state) {
llt.compute(a);
benchmark::DoNotOptimize(&llt);
}
}
template <typename Scalar, int N>
static void BM_LLT_Solve(benchmark::State& state) {
Matrix<Scalar, N, N> a;
a.setRandom();
a = a.transpose() * a + Matrix<Scalar, N, N>::Identity();
Matrix<Scalar, N, 1> b = Matrix<Scalar, N, 1>::Random();
LLT<Matrix<Scalar, N, N>> llt(a);
Matrix<Scalar, N, 1> x;
for (auto _ : state) {
x = llt.solve(b);
benchmark::DoNotOptimize(x.data());
}
}
// ============================================================================
// LDLT — for semi-definite matrices
// ============================================================================
template <typename Scalar, int N>
static void BM_LDLT_Compute(benchmark::State& state) {
Matrix<Scalar, N, N> a;
a.setRandom();
a = a.transpose() * a + Matrix<Scalar, N, N>::Identity();
LDLT<Matrix<Scalar, N, N>> ldlt;
for (auto _ : state) {
ldlt.compute(a);
benchmark::DoNotOptimize(&ldlt);
}
}
// ============================================================================
// PartialPivLU — for general square systems
// ============================================================================
template <typename Scalar, int N>
static void BM_PartialPivLU_Compute(benchmark::State& state) {
Matrix<Scalar, N, N> a;
a.setRandom();
a += Matrix<Scalar, N, N>::Identity() * Scalar(N);
PartialPivLU<Matrix<Scalar, N, N>> lu;
for (auto _ : state) {
lu.compute(a);
benchmark::DoNotOptimize(lu.matrixLU().data());
}
}
template <typename Scalar, int N>
static void BM_PartialPivLU_Solve(benchmark::State& state) {
Matrix<Scalar, N, N> a;
a.setRandom();
a += Matrix<Scalar, N, N>::Identity() * Scalar(N);
Matrix<Scalar, N, 1> b = Matrix<Scalar, N, 1>::Random();
PartialPivLU<Matrix<Scalar, N, N>> lu(a);
Matrix<Scalar, N, 1> x;
for (auto _ : state) {
x = lu.solve(b);
benchmark::DoNotOptimize(x.data());
}
}
// ============================================================================
// ColPivHouseholderQR — for least-squares (camera calibration, etc.)
// ============================================================================
template <typename Scalar, int Rows, int Cols>
static void BM_ColPivQR_Compute(benchmark::State& state) {
Matrix<Scalar, Rows, Cols> a;
a.setRandom();
ColPivHouseholderQR<Matrix<Scalar, Rows, Cols>> qr;
for (auto _ : state) {
qr.compute(a);
benchmark::DoNotOptimize(qr.matrixR().data());
}
}
// ============================================================================
// JacobiSVD — the workhorse for small matrices in CV
// ============================================================================
template <typename Scalar, int Rows, int Cols, int Options = ComputeThinU | ComputeThinV>
static void BM_JacobiSVD_Compute(benchmark::State& state) {
Matrix<Scalar, Rows, Cols> a;
a.setRandom();
JacobiSVD<Matrix<Scalar, Rows, Cols>, Options> svd;
for (auto _ : state) {
svd.compute(a);
benchmark::DoNotOptimize(svd.singularValues().data());
}
}
template <typename Scalar, int Rows, int Cols>
static void BM_JacobiSVD_Solve(benchmark::State& state) {
Matrix<Scalar, Rows, Cols> a;
a.setRandom();
Matrix<Scalar, Rows, 1> b = Matrix<Scalar, Rows, 1>::Random();
JacobiSVD<Matrix<Scalar, Rows, Cols>, ComputeThinU | ComputeThinV> svd(a);
Matrix<Scalar, Cols, 1> x;
for (auto _ : state) {
x = svd.solve(b);
benchmark::DoNotOptimize(x.data());
}
}
// ============================================================================
// SelfAdjointEigenSolver — PCA, normal estimation
// ============================================================================
template <typename Scalar, int N>
static void BM_SelfAdjointEig_Compute(benchmark::State& state) {
Matrix<Scalar, N, N> a;
a.setRandom();
a = a.transpose() * a;
SelfAdjointEigenSolver<Matrix<Scalar, N, N>> eig;
for (auto _ : state) {
eig.compute(a);
benchmark::DoNotOptimize(eig.eigenvalues().data());
}
}
// SelfAdjointEigenSolver::computeDirect — closed-form for 2x2 and 3x3
template <typename Scalar, int N>
static void BM_SelfAdjointEig_ComputeDirect(benchmark::State& state) {
Matrix<Scalar, N, N> a;
a.setRandom();
a = a.transpose() * a;
SelfAdjointEigenSolver<Matrix<Scalar, N, N>> eig;
for (auto _ : state) {
eig.computeDirect(a);
benchmark::DoNotOptimize(eig.eigenvalues().data());
}
}
// ============================================================================
// Registration — focus on robotics/CV sizes
// ============================================================================
// Matrix multiply: 2x2, 3x3, 4x4, 6x6
BENCHMARK(BM_MatMul<float, 2>);
BENCHMARK(BM_MatMul<float, 3>);
BENCHMARK(BM_MatMul<float, 4>);
BENCHMARK(BM_MatMul<float, 6>);
BENCHMARK(BM_MatMul<double, 2>);
BENCHMARK(BM_MatMul<double, 3>);
BENCHMARK(BM_MatMul<double, 4>);
BENCHMARK(BM_MatMul<double, 6>);
// Matrix-vector multiply
BENCHMARK(BM_MatVec<float, 3>);
BENCHMARK(BM_MatVec<float, 4>);
BENCHMARK(BM_MatVec<float, 6>);
BENCHMARK(BM_MatVec<double, 3>);
BENCHMARK(BM_MatVec<double, 4>);
BENCHMARK(BM_MatVec<double, 6>);
// Inverse
BENCHMARK(BM_Inverse<float, 2>);
BENCHMARK(BM_Inverse<float, 3>);
BENCHMARK(BM_Inverse<float, 4>);
BENCHMARK(BM_Inverse<double, 2>);
BENCHMARK(BM_Inverse<double, 3>);
BENCHMARK(BM_Inverse<double, 4>);
// Determinant
BENCHMARK(BM_Determinant<float, 2>);
BENCHMARK(BM_Determinant<float, 3>);
BENCHMARK(BM_Determinant<float, 4>);
BENCHMARK(BM_Determinant<double, 2>);
BENCHMARK(BM_Determinant<double, 3>);
BENCHMARK(BM_Determinant<double, 4>);
// LLT (Cholesky)
BENCHMARK(BM_LLT_Compute<float, 3>);
BENCHMARK(BM_LLT_Compute<float, 4>);
BENCHMARK(BM_LLT_Compute<float, 6>);
BENCHMARK(BM_LLT_Compute<double, 3>);
BENCHMARK(BM_LLT_Compute<double, 4>);
BENCHMARK(BM_LLT_Compute<double, 6>);
BENCHMARK(BM_LLT_Solve<double, 3>);
BENCHMARK(BM_LLT_Solve<double, 6>);
// LDLT
BENCHMARK(BM_LDLT_Compute<double, 3>);
BENCHMARK(BM_LDLT_Compute<double, 6>);
// PartialPivLU
BENCHMARK(BM_PartialPivLU_Compute<float, 3>);
BENCHMARK(BM_PartialPivLU_Compute<float, 4>);
BENCHMARK(BM_PartialPivLU_Compute<double, 3>);
BENCHMARK(BM_PartialPivLU_Compute<double, 4>);
BENCHMARK(BM_PartialPivLU_Solve<double, 3>);
BENCHMARK(BM_PartialPivLU_Solve<double, 4>);
// ColPivHouseholderQR
BENCHMARK(BM_ColPivQR_Compute<float, 3, 3>);
BENCHMARK(BM_ColPivQR_Compute<double, 3, 3>);
BENCHMARK(BM_ColPivQR_Compute<double, 6, 6>);
BENCHMARK(BM_ColPivQR_Compute<double, 8, 3>); // overdetermined least-squares
// JacobiSVD — the key CV sizes
BENCHMARK(BM_JacobiSVD_Compute<float, 2, 2>);
BENCHMARK(BM_JacobiSVD_Compute<float, 3, 3>);
BENCHMARK(BM_JacobiSVD_Compute<float, 4, 4>);
BENCHMARK(BM_JacobiSVD_Compute<double, 2, 2>);
BENCHMARK(BM_JacobiSVD_Compute<double, 3, 3>);
BENCHMARK(BM_JacobiSVD_Compute<double, 4, 4>);
BENCHMARK(BM_JacobiSVD_Compute<double, 3, 4>); // projection matrix
BENCHMARK(BM_JacobiSVD_Compute<double, 6, 6>); // manipulator Jacobian
BENCHMARK(BM_JacobiSVD_Compute<double, 8, 9>); // fundamental matrix (8-point)
BENCHMARK(BM_JacobiSVD_Solve<double, 3, 3>);
BENCHMARK(BM_JacobiSVD_Solve<double, 6, 6>);
// Values-only SVD (when you just need singular values)
BENCHMARK((BM_JacobiSVD_Compute<double, 3, 3, 0>));
BENCHMARK((BM_JacobiSVD_Compute<double, 6, 6, 0>));
// SelfAdjointEigenSolver — PCA, normal estimation
BENCHMARK(BM_SelfAdjointEig_Compute<float, 3>);
BENCHMARK(BM_SelfAdjointEig_Compute<float, 4>);
BENCHMARK(BM_SelfAdjointEig_Compute<double, 3>);
BENCHMARK(BM_SelfAdjointEig_Compute<double, 4>);
BENCHMARK(BM_SelfAdjointEig_Compute<double, 6>);
// SelfAdjointEigenSolver::computeDirect (closed-form, 2x2 and 3x3 only)
BENCHMARK(BM_SelfAdjointEig_ComputeDirect<float, 2>);
BENCHMARK(BM_SelfAdjointEig_ComputeDirect<float, 3>);
BENCHMARK(BM_SelfAdjointEig_ComputeDirect<double, 2>);
BENCHMARK(BM_SelfAdjointEig_ComputeDirect<double, 3>);

View File

@@ -15,6 +15,7 @@
#define EIGEN_RUNTIME_NO_MALLOC
#include "main.h"
#include "tridiag_test_matrices.h"
#include <Eigen/SVD>
#define SVD_DEFAULT(M) BDCSVD<M>
@@ -146,148 +147,26 @@ void verify_bidiagonal_vs_matrix_svd(const Matrix<RealScalar, Dynamic, 1>& diag,
template <typename RealScalar>
void bdcsvd_bidiagonal_hard_cases() {
using std::abs;
using std::cos;
using std::pow;
using std::sin;
typedef Matrix<RealScalar, Dynamic, 1> VectorXr;
Eigen::internal::set_is_malloc_allowed(true);
const RealScalar eps = NumTraits<RealScalar>::epsilon();
// Use the shared tridiagonal test matrix generators.
// Each generator fills (diag, offdiag) which we treat as (diagonal, superdiagonal)
// of a bidiagonal matrix.
test::for_all_tridiag_test_matrices<RealScalar>(
[](const auto& diag, const auto& offdiag) { verify_bidiagonal_svd<RealScalar>(diag, offdiag); });
// Test sizes: cover n=1, very small, below/above algoSwap (16), and larger.
const int sizes[] = {1, 2, 3, 5, 10, 16, 20, 50, 100};
const int numSizes = sizeof(sizes) / sizeof(sizes[0]);
// Additional SVD-specific test: identity with cross-validation against full matrix SVD.
test::for_tridiag_sizes<RealScalar>([](auto& diag, auto& offdiag) {
test::tridiag_identity(diag, offdiag);
verify_bidiagonal_vs_matrix_svd<RealScalar>(diag, offdiag);
});
for (int si = 0; si < numSizes; ++si) {
const Index n = sizes[si];
VectorXr diag(n), superdiag(n > 1 ? n - 1 : 0);
// 1. Identity: d=[1,...,1], e=[0,...,0]
diag.setOnes();
superdiag.setZero();
verify_bidiagonal_svd<RealScalar>(diag, superdiag);
verify_bidiagonal_vs_matrix_svd<RealScalar>(diag, superdiag);
// 2. Zero: d=[0,...,0], e=[0,...,0]
diag.setZero();
superdiag.setZero();
verify_bidiagonal_svd<RealScalar>(diag, superdiag);
// 3. Scalar (only meaningful for n=1, but runs for all)
if (n == 1) {
diag(0) = RealScalar(3.14);
verify_bidiagonal_svd<RealScalar>(diag, superdiag);
}
// 4. Golub-Kahan: d=[1,...,1], e=[1,...,1]
diag.setOnes();
if (n > 1) superdiag.setOnes();
verify_bidiagonal_svd<RealScalar>(diag, superdiag);
// 5. Kahan matrix: d_i = s^(i-1), e_i = -c*s^(i-1)
// Clamp exponents so condition number stays bounded by 1/eps.
{
const RealScalar theta = RealScalar(0.3);
const RealScalar s = sin(theta);
const RealScalar c = cos(theta);
using std::log;
const RealScalar maxPower = -log(eps) / (-log(s));
for (Index i = 0; i < n; ++i) diag(i) = pow(s, numext::mini(RealScalar(i), maxPower));
for (Index i = 0; i < n - 1; ++i) superdiag(i) = -c * pow(s, numext::mini(RealScalar(i), maxPower));
verify_bidiagonal_svd<RealScalar>(diag, superdiag);
}
// 6. Geometric decay diagonal: d_i = 0.5^i, e=[0,...,0]
// Clamp so condition number stays bounded by 1/eps.
{
using std::log;
const RealScalar base = RealScalar(0.5);
const RealScalar maxPower = -log(eps) / (-log(base));
for (Index i = 0; i < n; ++i) diag(i) = pow(base, numext::mini(RealScalar(i), maxPower));
superdiag.setZero();
verify_bidiagonal_svd<RealScalar>(diag, superdiag);
}
// 7. Geometric decay superdiagonal: d=[1,...,1], e_i = 0.5^i
diag.setOnes();
for (Index i = 0; i < n - 1; ++i) superdiag(i) = pow(RealScalar(0.5), RealScalar(i));
verify_bidiagonal_svd<RealScalar>(diag, superdiag);
// 8. Clustered at 1: d_i = 1 + i*eps, e=[0,...,0]
for (Index i = 0; i < n; ++i) diag(i) = RealScalar(1) + RealScalar(i) * eps;
superdiag.setZero();
verify_bidiagonal_svd<RealScalar>(diag, superdiag);
// 9. Two clusters: half ≈ 1, half ≈ eps
for (Index i = 0; i < n; ++i) diag(i) = (i < n / 2) ? RealScalar(1) : eps;
superdiag.setZero();
verify_bidiagonal_svd<RealScalar>(diag, superdiag);
// 10. Single tiny singular value: d=[1,...,1,eps], e=[eps^2,...]
diag.setOnes();
diag(n - 1) = eps;
for (Index i = 0; i < n - 1; ++i) superdiag(i) = eps * eps;
verify_bidiagonal_svd<RealScalar>(diag, superdiag);
// 11. Graded: d_i = 10^(-i), e_i = 10^(-i)
for (Index i = 0; i < n; ++i) diag(i) = pow(RealScalar(10), -RealScalar(i));
for (Index i = 0; i < n - 1; ++i) superdiag(i) = pow(RealScalar(10), -RealScalar(i));
verify_bidiagonal_svd<RealScalar>(diag, superdiag);
// 12. Nearly diagonal: random diag, eps * random superdiag
diag = VectorXr::Random(n).cwiseAbs() + VectorXr::Constant(n, RealScalar(0.1));
for (Index i = 0; i < n - 1; ++i) superdiag(i) = eps * (RealScalar(0.5) + abs(internal::random<RealScalar>()));
verify_bidiagonal_svd<RealScalar>(diag, superdiag);
// 13. All equal: d=[c,...,c], e=[c,...,c]
diag.setConstant(RealScalar(2.5));
if (n > 1) superdiag.setConstant(RealScalar(2.5));
verify_bidiagonal_svd<RealScalar>(diag, superdiag);
// 14. Wilkinson: d_i = |n/2 - i|, e=[1,...,1]
for (Index i = 0; i < n; ++i) diag(i) = abs(RealScalar(n / 2) - RealScalar(i));
if (n > 1) superdiag.setOnes();
verify_bidiagonal_svd<RealScalar>(diag, superdiag);
// 15. Overflow/underflow: alternating big/tiny diagonal, tiny/big superdiagonal
{
const RealScalar big = (std::numeric_limits<RealScalar>::max)() / RealScalar(1000);
const RealScalar tiny = (std::numeric_limits<RealScalar>::min)() * RealScalar(1000);
for (Index i = 0; i < n; ++i) diag(i) = (i % 2 == 0) ? big : tiny;
for (Index i = 0; i < n - 1; ++i) superdiag(i) = (i % 2 == 0) ? tiny : big;
verify_bidiagonal_svd<RealScalar>(diag, superdiag);
}
// 16. Prescribed condition number: d_i = kappa^(-i/(n-1)), e_i = eps * random
if (n > 1) {
const RealScalar kappa = RealScalar(1) / eps;
for (Index i = 0; i < n; ++i) diag(i) = pow(kappa, -RealScalar(i) / RealScalar(n - 1));
for (Index i = 0; i < n - 1; ++i) superdiag(i) = eps * abs(internal::random<RealScalar>());
verify_bidiagonal_svd<RealScalar>(diag, superdiag);
}
// 17. Rank-deficient: d=[1,..,0,..,0,..,1], e=[0,...,0]
for (Index i = 0; i < n; ++i) diag(i) = (i < n / 3 || i >= 2 * n / 3) ? RealScalar(1) : RealScalar(0);
superdiag.setZero();
verify_bidiagonal_svd<RealScalar>(diag, superdiag);
// 18. Arrowhead stress: d_i = linspace(1, n), e_i = 1/(i+1)
for (Index i = 0; i < n; ++i) diag(i) = RealScalar(1) + RealScalar(i);
for (Index i = 0; i < n - 1; ++i) superdiag(i) = RealScalar(1) / RealScalar(i + 1);
verify_bidiagonal_svd<RealScalar>(diag, superdiag);
// 19. Repeated singular values: d=[1,2,3,1,2,3,...], e=[0,...,0]
for (Index i = 0; i < n; ++i) diag(i) = RealScalar((i % 3) + 1);
superdiag.setZero();
verify_bidiagonal_svd<RealScalar>(diag, superdiag);
// 20. Glued identity: d=[1,...,1], e=0 except e[n/2-1]=eps
diag.setOnes();
superdiag.setZero();
if (n > 2) superdiag(n / 2 - 1) = eps;
verify_bidiagonal_svd<RealScalar>(diag, superdiag);
// Additional SVD-specific test: scalar for n=1.
{
typedef Matrix<RealScalar, Dynamic, 1> VectorXr;
VectorXr diag(1), offdiag(0);
diag(0) = RealScalar(3.14);
verify_bidiagonal_svd<RealScalar>(diag, offdiag);
}
}

View File

@@ -10,6 +10,7 @@
#include "main.h"
#include "svd_fill.h"
#include "tridiag_test_matrices.h"
#include <limits>
#include <Eigen/Eigenvalues>
#include <Eigen/SparseCore>
@@ -25,17 +26,39 @@ void selfadjointeigensolver_essential_check(const MatrixType& m) {
SelfAdjointEigenSolver<MatrixType> eiSymm(m);
VERIFY_IS_EQUAL(eiSymm.info(), Success);
Index n = m.cols();
RealScalar scaling = m.cwiseAbs().maxCoeff();
RealScalar unitary_error_factor = RealScalar(32);
if (scaling < (std::numeric_limits<RealScalar>::min)()) {
VERIFY(eiSymm.eigenvalues().cwiseAbs().maxCoeff() <= (std::numeric_limits<RealScalar>::min)());
} else {
VERIFY_IS_APPROX((m.template selfadjointView<Lower>() * eiSymm.eigenvectors()) / scaling,
(eiSymm.eigenvectors() * eiSymm.eigenvalues().asDiagonal()) / scaling);
// Columnwise residual check: for each eigenpair (lambda_i, v_i),
// ||A*v_i - lambda_i*v_i|| / ||A||_max <= c * n * eps
// This ensures accuracy for every eigenpair, including those corresponding
// to small eigenvalues (which a Frobenius norm check would miss).
// Computed in scaled space (dividing by ||A||_max) to avoid overflow.
MatrixType scaledA = m.template selfadjointView<Lower>();
scaledA /= scaling;
MatrixType residual =
scaledA * eiSymm.eigenvectors() - eiSymm.eigenvectors() * (eiSymm.eigenvalues() / scaling).asDiagonal();
RealScalar tol = RealScalar(4) * RealScalar(numext::maxi(Index(1), n)) * NumTraits<RealScalar>::epsilon();
for (Index i = 0; i < n; ++i) {
VERIFY(residual.col(i).norm() <= tol);
}
}
VERIFY_IS_APPROX(m.template selfadjointView<Lower>().eigenvalues(), eiSymm.eigenvalues());
VERIFY(eiSymm.eigenvectors().isUnitary(test_precision<RealScalar>() * unitary_error_factor));
// Eigenvectors must be unitary. Use a tolerance proportional to n*epsilon,
// which is the expected rounding error for Householder-based orthogonal transformations.
RealScalar unitary_tol = RealScalar(4) * RealScalar(numext::maxi(Index(1), n)) * NumTraits<RealScalar>::epsilon();
// But don't go below the test_precision floor (matters for float).
unitary_tol = numext::maxi(unitary_tol, test_precision<RealScalar>());
VERIFY(eiSymm.eigenvectors().isUnitary(unitary_tol));
// Verify eigenvalues are sorted in non-decreasing order.
for (Index i = 1; i < n; ++i) {
VERIFY(eiSymm.eigenvalues()(i) >= eiSymm.eigenvalues()(i - 1));
}
if (m.cols() <= 4) {
SelfAdjointEigenSolver<MatrixType> eiDirect;
@@ -53,12 +76,20 @@ void selfadjointeigensolver_essential_check(const MatrixType& m) {
VERIFY(eiDirect.eigenvalues().cwiseAbs().maxCoeff() <= (std::numeric_limits<RealScalar>::min)());
} else {
VERIFY_IS_APPROX(eiSymm.eigenvalues() / scaling, eiDirect.eigenvalues() / scaling);
// TODO: the direct 3x3 solver can produce large backward errors (>>n*eps*||A||)
// on some matrices. Investigate and fix, then tighten this to a Frobenius norm check.
VERIFY_IS_APPROX((m.template selfadjointView<Lower>() * eiDirect.eigenvectors()) / scaling,
(eiDirect.eigenvectors() * eiDirect.eigenvalues().asDiagonal()) / scaling);
VERIFY_IS_APPROX(m.template selfadjointView<Lower>().eigenvalues() / scaling, eiDirect.eigenvalues() / scaling);
}
VERIFY(eiDirect.eigenvectors().isUnitary(test_precision<RealScalar>() * unitary_error_factor));
// Direct solver eigenvectors must also be unitary.
VERIFY(eiDirect.eigenvectors().isUnitary(unitary_tol));
// Direct solver eigenvalues must also be sorted.
for (Index i = 1; i < n; ++i) {
VERIFY(eiDirect.eigenvalues()(i) >= eiDirect.eigenvalues()(i - 1));
}
}
}
@@ -149,9 +180,15 @@ void selfadjointeigensolver(const MatrixType& m) {
VERIFY_IS_APPROX(tridiag.diagonal(), tridiag.matrixT().diagonal());
VERIFY_IS_APPROX(tridiag.subDiagonal(), tridiag.matrixT().template diagonal<-1>());
Matrix<RealScalar, Dynamic, Dynamic> T = tridiag.matrixT();
if (rows > 1 && cols > 1) {
// FIXME check that upper and lower part are 0:
// VERIFY(T.topRightCorner(rows-2, cols-2).template triangularView<Upper>().isZero());
if (rows > 2) {
// Verify that the tridiagonal matrix is actually tridiagonal (zero outside the three central diagonals).
for (Index i = 0; i < rows; ++i) {
for (Index j = 0; j < cols; ++j) {
if (numext::abs(i - j) > 1) {
VERIFY(numext::is_exactly_zero(T(i, j)));
}
}
}
}
VERIFY_IS_APPROX(tridiag.diagonal(), T.diagonal());
VERIFY_IS_APPROX(tridiag.subDiagonal(), T.template diagonal<1>());
@@ -170,7 +207,7 @@ void selfadjointeigensolver(const MatrixType& m) {
eiSymmTridiag.eigenvectors().real().transpose());
}
// Test matrix expponential from eigendecomposition.
// Test matrix exponential from eigendecomposition.
// First scale to avoid overflow.
symmB = symmB / symmB.norm();
eiSymm.compute(symmB);
@@ -202,6 +239,451 @@ void selfadjointeigensolver(const MatrixType& m) {
}
}
// Test matrices with exact eigenvalue multiplicities.
template <typename MatrixType>
void selfadjointeigensolver_repeated_eigenvalues(const MatrixType& m) {
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
Index n = m.rows();
if (n < 2) return;
// Create a random unitary matrix via QR.
MatrixType q = MatrixType::Random(n, n);
HouseholderQR<MatrixType> qr(q);
q = qr.householderQ();
// All eigenvalues equal (scalar multiple of identity).
{
RealScalar lambda = internal::random<RealScalar>(-10, 10);
MatrixType A = lambda * MatrixType::Identity(n, n);
selfadjointeigensolver_essential_check(A);
}
// Eigenvalue of multiplicity n-1 (one distinct, rest equal).
{
Matrix<RealScalar, Dynamic, 1> d = Matrix<RealScalar, Dynamic, 1>::Constant(n, RealScalar(3));
d(0) = RealScalar(-2);
MatrixType A = (q * d.template cast<Scalar>().asDiagonal() * q.adjoint()).eval();
A.template triangularView<StrictlyUpper>().setZero();
selfadjointeigensolver_essential_check(A);
}
// Two clusters: first half one value, second half another.
if (n >= 4) {
Matrix<RealScalar, Dynamic, 1> d(n);
for (Index i = 0; i < n / 2; ++i) d(i) = RealScalar(1);
for (Index i = n / 2; i < n; ++i) d(i) = RealScalar(5);
MatrixType A = (q * d.template cast<Scalar>().asDiagonal() * q.adjoint()).eval();
A.template triangularView<StrictlyUpper>().setZero();
selfadjointeigensolver_essential_check(A);
}
// Nearly repeated eigenvalues: separated by O(epsilon).
{
Matrix<RealScalar, Dynamic, 1> d(n);
for (Index i = 0; i < n; ++i) {
d(i) = RealScalar(1) + RealScalar(i) * NumTraits<RealScalar>::epsilon() * RealScalar(10);
}
MatrixType A = (q * d.template cast<Scalar>().asDiagonal() * q.adjoint()).eval();
A.template triangularView<StrictlyUpper>().setZero();
selfadjointeigensolver_essential_check(A);
}
}
// Test matrices with extreme condition numbers and eigenvalue ranges.
template <typename MatrixType>
void selfadjointeigensolver_extreme_eigenvalues(const MatrixType& m) {
using std::pow;
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
Index n = m.rows();
if (n < 2) return;
// Create a random unitary matrix.
MatrixType q = MatrixType::Random(n, n);
HouseholderQR<MatrixType> qr(q);
q = qr.householderQ();
// Eigenvalues spanning many orders of magnitude (high condition number).
{
RealScalar maxExp = RealScalar(std::numeric_limits<RealScalar>::max_exponent10) / RealScalar(4);
Matrix<RealScalar, Dynamic, 1> d(n);
for (Index i = 0; i < n; ++i) {
RealScalar exponent = -maxExp + RealScalar(2) * maxExp * RealScalar(i) / RealScalar(n - 1);
d(i) = pow(RealScalar(10), exponent);
}
MatrixType A = (q * d.template cast<Scalar>().asDiagonal() * q.adjoint()).eval();
A.template triangularView<StrictlyUpper>().setZero();
SelfAdjointEigenSolver<MatrixType> eig(A);
VERIFY_IS_EQUAL(eig.info(), Success);
// For ill-conditioned matrices we can only check the relative residual.
// ||A*V - V*D|| / ||A|| should be O(n * epsilon).
RealScalar Anorm = A.template selfadjointView<Lower>().operatorNorm();
if (Anorm > (std::numeric_limits<RealScalar>::min)()) {
MatrixType residual = A.template selfadjointView<Lower>() * eig.eigenvectors() -
eig.eigenvectors() * eig.eigenvalues().asDiagonal();
RealScalar rel_err = residual.norm() / Anorm;
RealScalar tol = RealScalar(4) * RealScalar(n) * NumTraits<RealScalar>::epsilon();
VERIFY(rel_err <= tol);
}
// Eigenvalues must still be sorted.
for (Index i = 1; i < n; ++i) {
VERIFY(eig.eigenvalues()(i) >= eig.eigenvalues()(i - 1));
}
}
// Very tiny eigenvalues (near underflow).
{
RealScalar tiny = (std::numeric_limits<RealScalar>::min)() * RealScalar(100);
Matrix<RealScalar, Dynamic, 1> d(n);
for (Index i = 0; i < n; ++i) {
d(i) = tiny * (RealScalar(1) + RealScalar(i));
}
MatrixType A = (q * d.template cast<Scalar>().asDiagonal() * q.adjoint()).eval();
A.template triangularView<StrictlyUpper>().setZero();
selfadjointeigensolver_essential_check(A);
}
// Very large eigenvalues (near overflow).
{
RealScalar huge = (std::numeric_limits<RealScalar>::max)() / (RealScalar(n) * RealScalar(100));
Matrix<RealScalar, Dynamic, 1> d(n);
for (Index i = 0; i < n; ++i) {
d(i) = huge * (RealScalar(1) + RealScalar(i) * RealScalar(0.01));
}
MatrixType A = (q * d.template cast<Scalar>().asDiagonal() * q.adjoint()).eval();
A.template triangularView<StrictlyUpper>().setZero();
selfadjointeigensolver_essential_check(A);
}
// Mix of positive and negative eigenvalues.
{
Matrix<RealScalar, Dynamic, 1> d(n);
for (Index i = 0; i < n; ++i) {
d(i) = (i % 2 == 0) ? RealScalar(i + 1) : RealScalar(-(i + 1));
}
MatrixType A = (q * d.template cast<Scalar>().asDiagonal() * q.adjoint()).eval();
A.template triangularView<StrictlyUpper>().setZero();
selfadjointeigensolver_essential_check(A);
}
// One zero eigenvalue among non-zero ones (rank-deficient).
{
Matrix<RealScalar, Dynamic, 1> d = Matrix<RealScalar, Dynamic, 1>::LinSpaced(n, RealScalar(0), RealScalar(n - 1));
MatrixType A = (q * d.template cast<Scalar>().asDiagonal() * q.adjoint()).eval();
A.template triangularView<StrictlyUpper>().setZero();
selfadjointeigensolver_essential_check(A);
}
}
// Test computeFromTridiagonal with scaled inputs (regression for missing scaling).
template <typename MatrixType>
void selfadjointeigensolver_tridiagonal_scaled(const MatrixType& m) {
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
Index n = m.rows();
if (n < 2) return;
// Create a tridiagonal matrix with large entries.
typedef Matrix<RealScalar, Dynamic, 1> RealVectorType;
RealVectorType diag(n), subdiag(n - 1);
// Case 1: Large values.
RealScalar scale = (std::numeric_limits<RealScalar>::max)() / (RealScalar(n) * RealScalar(100));
for (Index i = 0; i < n; ++i) diag(i) = scale * RealScalar(i + 1);
for (Index i = 0; i < n - 1; ++i) subdiag(i) = scale * RealScalar(0.5);
SelfAdjointEigenSolver<MatrixType> eig1;
eig1.computeFromTridiagonal(diag, subdiag, ComputeEigenvectors);
VERIFY_IS_EQUAL(eig1.info(), Success);
// Reconstruct tridiagonal and check residual.
Matrix<RealScalar, Dynamic, Dynamic> T = Matrix<RealScalar, Dynamic, Dynamic>::Zero(n, n);
T.diagonal() = diag;
T.template diagonal<1>() = subdiag;
T.template diagonal<-1>() = subdiag;
VERIFY_IS_APPROX(
T, eig1.eigenvectors().real() * eig1.eigenvalues().asDiagonal() * eig1.eigenvectors().real().transpose());
// Case 2: Tiny values.
scale = (std::numeric_limits<RealScalar>::min)() * RealScalar(100);
for (Index i = 0; i < n; ++i) diag(i) = scale * RealScalar(i + 1);
for (Index i = 0; i < n - 1; ++i) subdiag(i) = scale * RealScalar(0.5);
SelfAdjointEigenSolver<MatrixType> eig2;
eig2.computeFromTridiagonal(diag, subdiag, ComputeEigenvectors);
VERIFY_IS_EQUAL(eig2.info(), Success);
// Eigenvalues-only mode should produce the same eigenvalues.
SelfAdjointEigenSolver<MatrixType> eig2v;
eig2v.computeFromTridiagonal(diag, subdiag, EigenvaluesOnly);
VERIFY_IS_EQUAL(eig2v.info(), Success);
VERIFY_IS_APPROX(eig2.eigenvalues(), eig2v.eigenvalues());
}
// Test computeFromTridiagonal with wide dynamic range across decoupled blocks.
// This exercises the per-block scaling in computeFromTridiagonal_impl: a zero on the
// subdiagonal decouples the matrix into blocks with vastly different scales. Global
// scaling would underflow the small block; per-block scaling handles both correctly.
template <typename RealScalar>
void selfadjointeigensolver_tridiagonal_wide_range() {
using std::sqrt;
typedef Matrix<RealScalar, Dynamic, Dynamic> MatrixType;
typedef Matrix<RealScalar, Dynamic, 1> VectorType;
// Block 1: entries near overflow threshold.
// Block 2: entries near 1.
// Separated by a zero subdiagonal entry.
const RealScalar big = sqrt(NumTraits<RealScalar>::highest()) / RealScalar(10);
const Index n = 6;
VectorType diag(n), subdiag(n - 1);
// First block: [0..2], large scale.
diag(0) = big;
diag(1) = big * RealScalar(1.1);
diag(2) = big * RealScalar(0.9);
subdiag(0) = big * RealScalar(0.01);
subdiag(1) = big * RealScalar(0.02);
// Zero subdiagonal decouples the two blocks.
subdiag(2) = RealScalar(0);
// Second block: [3..5], O(1) scale.
diag(3) = RealScalar(1);
diag(4) = RealScalar(2);
diag(5) = RealScalar(3);
subdiag(3) = RealScalar(0.5);
subdiag(4) = RealScalar(0.3);
// Build the full tridiagonal matrix for residual checking.
MatrixType T = MatrixType::Zero(n, n);
T.diagonal() = diag;
T.template diagonal<1>() = subdiag;
T.template diagonal<-1>() = subdiag;
SelfAdjointEigenSolver<MatrixType> eig;
eig.computeFromTridiagonal(diag, subdiag, ComputeEigenvectors);
VERIFY_IS_EQUAL(eig.info(), Success);
// Eigenvalues must be sorted.
for (Index i = 1; i < n; ++i) {
VERIFY(eig.eigenvalues()(i) >= eig.eigenvalues()(i - 1));
}
// Eigenvectors must be orthonormal.
RealScalar unitary_tol = RealScalar(4) * RealScalar(n) * NumTraits<RealScalar>::epsilon();
VERIFY(eig.eigenvectors().isUnitary(unitary_tol));
// Full residual check in scaled coordinates.
RealScalar Tnorm = T.cwiseAbs().maxCoeff();
MatrixType Tscaled = T / Tnorm;
MatrixType residual = Tscaled * eig.eigenvectors() - eig.eigenvectors() * (eig.eigenvalues() / Tnorm).asDiagonal();
RealScalar rel_err = residual.norm() / Tscaled.norm();
VERIFY(rel_err <= RealScalar(8) * RealScalar(n) * NumTraits<RealScalar>::epsilon());
// The small eigenvalues (~1,2,3) must be accurate, not lost to underflow.
// With global scaling to [-1,1], dividing by 'big' would underflow these to zero.
// Verify the small eigenvalues are within O(eps) of their true values.
// The small block is exactly [[1, 0.5, 0], [0.5, 2, 0.3], [0, 0.3, 3]].
MatrixType T_small(3, 3);
T_small << RealScalar(1), RealScalar(0.5), RealScalar(0), RealScalar(0.5), RealScalar(2), RealScalar(0.3),
RealScalar(0), RealScalar(0.3), RealScalar(3);
SelfAdjointEigenSolver<MatrixType> eig_small(T_small);
VectorType small_evals = eig_small.eigenvalues();
// Find the 3 smallest eigenvalues from the combined solver (they should be sorted first).
VectorType combined_small = eig.eigenvalues().head(3);
VERIFY_IS_APPROX(combined_small, small_evals);
// Eigenvalues-only mode must agree.
SelfAdjointEigenSolver<MatrixType> eig_vals;
eig_vals.computeFromTridiagonal(diag, subdiag, EigenvaluesOnly);
VERIFY_IS_EQUAL(eig_vals.info(), Success);
VERIFY_IS_APPROX(eig.eigenvalues() / Tnorm, eig_vals.eigenvalues() / Tnorm);
}
// Test computeFromTridiagonal with structured hard-case matrices from the literature.
template <typename RealScalar>
void selfadjointeigensolver_structured_tridiagonal() {
typedef Matrix<RealScalar, Dynamic, Dynamic> MatrixType;
test::for_all_symmetric_tridiag_test_matrices<RealScalar>([](const auto& diag, const auto& offdiag) {
Index n = diag.size();
// Build the full symmetric tridiagonal matrix for residual checking.
MatrixType T = MatrixType::Zero(n, n);
T.diagonal() = diag;
if (n > 1) {
T.template diagonal<1>() = offdiag;
T.template diagonal<-1>() = offdiag;
}
RealScalar Tnorm = T.cwiseAbs().maxCoeff();
// Test with eigenvectors.
SelfAdjointEigenSolver<MatrixType> eig;
eig.computeFromTridiagonal(diag, offdiag, ComputeEigenvectors);
VERIFY_IS_EQUAL(eig.info(), Success);
// Eigenvalues must be sorted.
for (Index i = 1; i < n; ++i) {
VERIFY(eig.eigenvalues()(i) >= eig.eigenvalues()(i - 1));
}
// Eigenvectors must be orthonormal.
RealScalar unitary_tol =
numext::maxi(RealScalar(4) * RealScalar(n) * NumTraits<RealScalar>::epsilon(), test_precision<RealScalar>());
VERIFY(eig.eigenvectors().isUnitary(unitary_tol));
// Residual check: ||T*V - V*D||_F / ||T||_max should be O(n*eps).
// Scale T to avoid overflow in the matrix product when entries span extreme ranges.
if (Tnorm > (std::numeric_limits<RealScalar>::min)()) {
MatrixType Tscaled = T / Tnorm;
MatrixType residual =
Tscaled * eig.eigenvectors() - eig.eigenvectors() * (eig.eigenvalues() / Tnorm).asDiagonal();
RealScalar rel_err = residual.norm() / Tscaled.norm();
VERIFY(rel_err <= RealScalar(8) * RealScalar(n) * NumTraits<RealScalar>::epsilon());
}
// Eigenvalues-only mode must produce the same eigenvalues.
SelfAdjointEigenSolver<MatrixType> eig_vals;
eig_vals.computeFromTridiagonal(diag, offdiag, EigenvaluesOnly);
VERIFY_IS_EQUAL(eig_vals.info(), Success);
if (Tnorm > (std::numeric_limits<RealScalar>::min)()) {
VERIFY_IS_APPROX(eig.eigenvalues() / Tnorm, eig_vals.eigenvalues() / Tnorm);
}
});
}
// Test with diagonal matrices (tridiagonalization is trivial).
template <typename MatrixType>
void selfadjointeigensolver_diagonal(const MatrixType& m) {
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real RealScalar;
Index n = m.rows();
// Random diagonal matrix.
MatrixType diag = MatrixType::Zero(n, n);
for (Index i = 0; i < n; ++i) {
diag(i, i) = internal::random<RealScalar>(-100, 100);
}
selfadjointeigensolver_essential_check(diag);
// The eigenvalues should be the diagonal entries, sorted.
SelfAdjointEigenSolver<MatrixType> eig(diag);
VERIFY_IS_EQUAL(eig.info(), Success);
Matrix<RealScalar, Dynamic, 1> expected_evals(n);
for (Index i = 0; i < n; ++i) expected_evals(i) = numext::real(diag(i, i));
std::sort(expected_evals.data(), expected_evals.data() + n);
VERIFY_IS_APPROX(eig.eigenvalues(), expected_evals);
}
// Test operatorInverseSqrt more thoroughly.
template <typename MatrixType>
void selfadjointeigensolver_inverse_sqrt(const MatrixType& m) {
Index n = m.rows();
if (n < 1) return;
// Create a positive-definite matrix.
MatrixType a = MatrixType::Random(n, n);
MatrixType spd = a.adjoint() * a + MatrixType::Identity(n, n);
spd.template triangularView<StrictlyUpper>().setZero();
SelfAdjointEigenSolver<MatrixType> eig(spd);
VERIFY_IS_EQUAL(eig.info(), Success);
MatrixType sqrtA = eig.operatorSqrt();
MatrixType invSqrtA = eig.operatorInverseSqrt();
// sqrtA * invSqrtA should be identity.
VERIFY_IS_APPROX(sqrtA * invSqrtA, MatrixType::Identity(n, n));
// invSqrtA * A * invSqrtA should be identity.
VERIFY_IS_APPROX(invSqrtA * spd.template selfadjointView<Lower>() * invSqrtA, MatrixType::Identity(n, n));
// invSqrtA should be symmetric/selfadjoint.
VERIFY_IS_APPROX(invSqrtA, invSqrtA.adjoint());
}
// Test that RowMajor matrices work correctly with computeDirect.
template <int>
void selfadjointeigensolver_rowmajor() {
typedef Matrix<double, 3, 3, RowMajor> RowMajorMatrix3d;
typedef Matrix<double, 2, 2, RowMajor> RowMajorMatrix2d;
typedef Matrix<float, 3, 3, RowMajor> RowMajorMatrix3f;
typedef Matrix<float, 2, 2, RowMajor> RowMajorMatrix2f;
// 3x3 RowMajor double
{
RowMajorMatrix3d a = RowMajorMatrix3d::Random();
RowMajorMatrix3d symmA = a.transpose() * a;
SelfAdjointEigenSolver<RowMajorMatrix3d> eig;
eig.computeDirect(symmA);
VERIFY_IS_EQUAL(eig.info(), Success);
// Compare with iterative solver.
SelfAdjointEigenSolver<RowMajorMatrix3d> eigRef(symmA);
VERIFY_IS_APPROX(eigRef.eigenvalues(), eig.eigenvalues());
}
// 2x2 RowMajor double
{
RowMajorMatrix2d a = RowMajorMatrix2d::Random();
RowMajorMatrix2d symmA = a.transpose() * a;
SelfAdjointEigenSolver<RowMajorMatrix2d> eig;
eig.computeDirect(symmA);
VERIFY_IS_EQUAL(eig.info(), Success);
SelfAdjointEigenSolver<RowMajorMatrix2d> eigRef(symmA);
VERIFY_IS_APPROX(eigRef.eigenvalues(), eig.eigenvalues());
}
// 3x3 RowMajor float
{
RowMajorMatrix3f a = RowMajorMatrix3f::Random();
RowMajorMatrix3f symmA = a.transpose() * a;
SelfAdjointEigenSolver<RowMajorMatrix3f> eig;
eig.computeDirect(symmA);
VERIFY_IS_EQUAL(eig.info(), Success);
SelfAdjointEigenSolver<RowMajorMatrix3f> eigRef(symmA);
VERIFY_IS_APPROX(eigRef.eigenvalues(), eig.eigenvalues());
}
// 2x2 RowMajor float
{
RowMajorMatrix2f a = RowMajorMatrix2f::Random();
RowMajorMatrix2f symmA = a.transpose() * a;
SelfAdjointEigenSolver<RowMajorMatrix2f> eig;
eig.computeDirect(symmA);
VERIFY_IS_EQUAL(eig.info(), Success);
SelfAdjointEigenSolver<RowMajorMatrix2f> eigRef(symmA);
VERIFY_IS_APPROX(eigRef.eigenvalues(), eig.eigenvalues());
}
// Dynamic RowMajor with iterative solver
{
typedef Matrix<double, Dynamic, Dynamic, RowMajor> RowMajorMatrixXd;
int s = internal::random<int>(2, 20);
RowMajorMatrixXd a = RowMajorMatrixXd::Random(s, s);
RowMajorMatrixXd symmA = a.transpose() * a;
SelfAdjointEigenSolver<RowMajorMatrixXd> eig(symmA);
VERIFY_IS_EQUAL(eig.info(), Success);
double scaling = symmA.cwiseAbs().maxCoeff();
if (scaling > (std::numeric_limits<double>::min)()) {
VERIFY_IS_APPROX((symmA.template selfadjointView<Lower>() * eig.eigenvectors()) / scaling,
(eig.eigenvectors() * eig.eigenvalues().asDiagonal()) / scaling);
}
}
}
// Test matrix with Inf entries returns NoConvergence (similar to NaN test).
template <int>
void selfadjointeigensolver_inf() {
Matrix3d m;
m.setRandom();
m = m * m.transpose();
m(1, 1) = std::numeric_limits<double>::infinity();
SelfAdjointEigenSolver<Matrix3d> eig(m);
VERIFY_IS_EQUAL(eig.info(), NoConvergence);
}
template <int>
void bug_854() {
Matrix3d m;
@@ -227,11 +709,160 @@ void bug_1225() {
VERIFY_IS_APPROX(eig1.eigenvalues(), eig2.eigenvalues());
}
// Verify that non-finite inputs are detected for all sizes, including 1x1.
template <int>
void selfadjointeigensolver_nonfinite() {
const double inf = std::numeric_limits<double>::infinity();
const double nan = std::numeric_limits<double>::quiet_NaN();
// 1x1 Inf.
{
Matrix<double, 1, 1> m;
m << inf;
SelfAdjointEigenSolver<Matrix<double, 1, 1>> eig(m);
VERIFY_IS_EQUAL(eig.info(), NoConvergence);
}
// 1x1 NaN.
{
Matrix<double, 1, 1> m;
m << nan;
SelfAdjointEigenSolver<Matrix<double, 1, 1>> eig(m);
VERIFY_IS_EQUAL(eig.info(), NoConvergence);
}
// 1x1 -Inf.
{
Matrix<double, 1, 1> m;
m << -inf;
SelfAdjointEigenSolver<Matrix<double, 1, 1>> eig(m);
VERIFY_IS_EQUAL(eig.info(), NoConvergence);
}
// 3x3 with Inf.
{
Matrix3d m = Matrix3d::Identity();
m(1, 1) = inf;
SelfAdjointEigenSolver<Matrix3d> eig(m);
VERIFY_IS_EQUAL(eig.info(), NoConvergence);
}
// 3x3 with NaN.
{
Matrix3d m = Matrix3d::Identity();
m(0, 1) = m(1, 0) = nan;
SelfAdjointEigenSolver<Matrix3d> eig(m);
VERIFY_IS_EQUAL(eig.info(), NoConvergence);
}
// Dynamic size with Inf.
{
MatrixXd m = MatrixXd::Identity(5, 5);
m(3, 3) = inf;
SelfAdjointEigenSolver<MatrixXd> eig(m);
VERIFY_IS_EQUAL(eig.info(), NoConvergence);
}
}
template <int>
void bug_1204() {
SparseMatrix<double> A(2, 2);
A.setIdentity();
SelfAdjointEigenSolver<Eigen::SparseMatrix<double> > eig(A);
SelfAdjointEigenSolver<Eigen::SparseMatrix<double>> eig(A);
}
template <int>
void selfadjointeigensolver_tridiagonal_zerosized() {
SelfAdjointEigenSolver<MatrixXd> eig;
VectorXd diag(0), subdiag(0);
eig.computeFromTridiagonal(diag, subdiag, EigenvaluesOnly);
VERIFY_IS_EQUAL(eig.info(), Success);
VERIFY_IS_EQUAL(eig.eigenvalues().size(), 0);
VERIFY_RAISES_ASSERT(eig.eigenvectors());
eig.computeFromTridiagonal(diag, subdiag, ComputeEigenvectors);
VERIFY_IS_EQUAL(eig.info(), Success);
VERIFY_IS_EQUAL(eig.eigenvalues().size(), 0);
VERIFY_IS_EQUAL(eig.eigenvectors().rows(), 0);
VERIFY_IS_EQUAL(eig.eigenvectors().cols(), 0);
}
// Specific 3x3 test cases that stress the direct solver.
template <int>
void direct_3x3_stress() {
// Near-planar point cloud covariance: two large eigenvalues, one near-zero.
{
Matrix3d m;
m << 100, 50, 0.001, 50, 100, 0.002, 0.001, 0.002, 1e-10;
selfadjointeigensolver_essential_check(m);
}
// All equal diagonal entries (triple eigenvalue).
{
Matrix3d m = Matrix3d::Identity() * 7.0;
selfadjointeigensolver_essential_check(m);
}
// Two exactly equal eigenvalues (from explicit construction).
{
Matrix3d q;
q << 1, 0, 0, 0, 1.0 / std::sqrt(2.0), 1.0 / std::sqrt(2.0), 0, -1.0 / std::sqrt(2.0), 1.0 / std::sqrt(2.0);
Vector3d d(1.0, 5.0, 5.0);
Matrix3d m = q * d.asDiagonal() * q.transpose();
selfadjointeigensolver_essential_check(m);
}
// Large off-diagonal relative to diagonal.
{
Matrix3d m;
m << 1, 1000, 1000, 1000, 1, 1000, 1000, 1000, 1;
selfadjointeigensolver_essential_check(m);
}
// Nearly singular: one eigenvalue much smaller than others.
{
Matrix3d m;
m << 1, 0.5, 0.3, 0.5, 1, 0.4, 0.3, 0.4, 1;
m *= 1e15;
Matrix3d perturbation = Matrix3d::Zero();
perturbation(0, 0) = 1e-15;
m += perturbation;
selfadjointeigensolver_essential_check(m);
}
}
// Specific 2x2 test cases that stress the direct solver.
template <int>
void direct_2x2_stress() {
// Equal eigenvalues.
{
Matrix2d m = Matrix2d::Identity() * 42.0;
selfadjointeigensolver_essential_check(m);
}
// Very small off-diagonal.
{
Matrix2d m;
m << 1.0, 1e-15, 1e-15, 1.0;
selfadjointeigensolver_essential_check(m);
}
// Huge ratio between diagonal entries.
{
Matrix2d m;
m << 1e100, 0, 0, 1e-100;
selfadjointeigensolver_essential_check(m);
}
// Anti-diagonal dominant.
{
Matrix2d m;
m << 0, 1e10, 1e10, 0;
selfadjointeigensolver_essential_check(m);
}
// Negative entries.
{
Matrix2d m;
m << -5.0, 3.0, 3.0, -5.0;
selfadjointeigensolver_essential_check(m);
}
}
EIGEN_DECLARE_TEST(eigensolver_selfadjoint) {
@@ -266,12 +897,62 @@ EIGEN_DECLARE_TEST(eigensolver_selfadjoint) {
CALL_SUBTEST_5(selfadjointeigensolver(MatrixXcd(2, 2)));
CALL_SUBTEST_6(selfadjointeigensolver(Matrix<double, 1, 1>()));
CALL_SUBTEST_7(selfadjointeigensolver(Matrix<double, 2, 2>()));
// repeated eigenvalues
CALL_SUBTEST_17(selfadjointeigensolver_repeated_eigenvalues(Matrix3d()));
CALL_SUBTEST_15(selfadjointeigensolver_repeated_eigenvalues(Matrix2d()));
CALL_SUBTEST_2(selfadjointeigensolver_repeated_eigenvalues(Matrix4d()));
CALL_SUBTEST_4(selfadjointeigensolver_repeated_eigenvalues(MatrixXd(s, s)));
CALL_SUBTEST_13(selfadjointeigensolver_repeated_eigenvalues(Matrix3f()));
CALL_SUBTEST_12(selfadjointeigensolver_repeated_eigenvalues(Matrix2f()));
CALL_SUBTEST_18(selfadjointeigensolver_repeated_eigenvalues(Matrix3cd()));
// extreme eigenvalues (near overflow/underflow, high condition number)
CALL_SUBTEST_17(selfadjointeigensolver_extreme_eigenvalues(Matrix3d()));
CALL_SUBTEST_2(selfadjointeigensolver_extreme_eigenvalues(Matrix4d()));
CALL_SUBTEST_4(selfadjointeigensolver_extreme_eigenvalues(MatrixXd(s, s)));
CALL_SUBTEST_13(selfadjointeigensolver_extreme_eigenvalues(Matrix3f()));
CALL_SUBTEST_3(selfadjointeigensolver_extreme_eigenvalues(MatrixXf(s, s)));
// computeFromTridiagonal with scaled inputs
CALL_SUBTEST_4(selfadjointeigensolver_tridiagonal_scaled(MatrixXd(s, s)));
CALL_SUBTEST_3(selfadjointeigensolver_tridiagonal_scaled(MatrixXf(s, s)));
// structured tridiagonal hard cases from the literature
CALL_SUBTEST_4(selfadjointeigensolver_structured_tridiagonal<double>());
CALL_SUBTEST_3(selfadjointeigensolver_structured_tridiagonal<float>());
// wide dynamic range tridiagonal (per-block scaling regression)
CALL_SUBTEST_4(selfadjointeigensolver_tridiagonal_wide_range<double>());
CALL_SUBTEST_3(selfadjointeigensolver_tridiagonal_wide_range<float>());
// diagonal matrices
CALL_SUBTEST_17(selfadjointeigensolver_diagonal(Matrix3d()));
CALL_SUBTEST_4(selfadjointeigensolver_diagonal(MatrixXd(s, s)));
// operatorInverseSqrt
CALL_SUBTEST_17(selfadjointeigensolver_inverse_sqrt(Matrix3d()));
CALL_SUBTEST_2(selfadjointeigensolver_inverse_sqrt(Matrix4d()));
CALL_SUBTEST_4(selfadjointeigensolver_inverse_sqrt(MatrixXd(s, s)));
CALL_SUBTEST_13(selfadjointeigensolver_inverse_sqrt(Matrix3f()));
// RowMajor
CALL_SUBTEST_19(selfadjointeigensolver_rowmajor<0>());
}
CALL_SUBTEST_17(bug_854<0>());
CALL_SUBTEST_17(bug_1014<0>());
CALL_SUBTEST_17(bug_1204<0>());
CALL_SUBTEST_17(bug_1225<0>());
CALL_SUBTEST_17(selfadjointeigensolver_nonfinite<0>());
CALL_SUBTEST_8(selfadjointeigensolver_tridiagonal_zerosized<0>());
// Stress tests for direct 3x3 and 2x2 solvers.
CALL_SUBTEST_17(direct_3x3_stress<0>());
CALL_SUBTEST_15(direct_2x2_stress<0>());
// Test Inf input handling.
CALL_SUBTEST_17(selfadjointeigensolver_inf<0>());
// Test problem size constructors
s = internal::random<int>(1, EIGEN_TEST_MAX_SIZE / 4);

View File

@@ -0,0 +1,383 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2025 Rasmus Munk Larsen <rmlarsen@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_TEST_TRIDIAG_TEST_MATRICES_H
#define EIGEN_TEST_TRIDIAG_TEST_MATRICES_H
// Structured tridiagonal test matrices from the numerical linear algebra
// literature. Used by both the bidiagonal SVD and symmetric eigenvalue tests.
//
// Each generator writes into pre-allocated (diag, offdiag) vectors.
// For SVD, offdiag is the superdiagonal of a bidiagonal matrix.
// For eigenvalues, offdiag is the subdiagonal of a symmetric tridiagonal matrix.
//
// Usage:
// Matrix<RealScalar, Dynamic, 1> diag(n), offdiag(n-1);
// tridiag_identity(diag, offdiag); // fills diag and offdiag
// my_verify(diag, offdiag); // solver-specific verification
#include <Eigen/Core>
namespace Eigen {
namespace test {
// 1. Identity: d=[1,...,1], e=[0,...,0]
template <typename VectorType>
void tridiag_identity(VectorType& diag, VectorType& offdiag) {
diag.setOnes();
offdiag.setZero();
}
// 2. Zero: d=[0,...,0], e=[0,...,0]
template <typename VectorType>
void tridiag_zero(VectorType& diag, VectorType& offdiag) {
diag.setZero();
offdiag.setZero();
}
// 3. Constant: d=[c,...,c], e=[c,...,c]
template <typename VectorType>
void tridiag_constant(VectorType& diag, VectorType& offdiag,
typename VectorType::Scalar c = typename VectorType::Scalar(2.5)) {
diag.setConstant(c);
offdiag.setConstant(c);
}
// 4. 1-2-1 Toeplitz: d=[2,...,2], e=[1,...,1]
// Eigenvalues: 2 - 2*cos(k*pi/(n+1)) for k=1,...,n
template <typename VectorType>
void tridiag_1_2_1(VectorType& diag, VectorType& offdiag) {
typedef typename VectorType::Scalar Scalar;
diag.setConstant(Scalar(2));
offdiag.setOnes();
}
// 5. Wilkinson W_{2m+1}: d_i = |m - i|, e=[1,...,1]
// Has pairs of eigenvalues agreeing to many digits; stresses deflation.
template <typename VectorType>
void tridiag_wilkinson(VectorType& diag, VectorType& offdiag) {
typedef typename VectorType::Scalar Scalar;
Index n = diag.size();
for (Index i = 0; i < n; ++i) diag(i) = numext::abs(Scalar(n / 2) - Scalar(i));
offdiag.setOnes();
}
// 6. Clement matrix: d=[0,...,0], e_i = sqrt(i*(n-1-i))
// Known eigenvalues: -(n-1), -(n-3), ..., (n-3), (n-1)
template <typename VectorType>
void tridiag_clement(VectorType& diag, VectorType& offdiag) {
EIGEN_USING_STD(sqrt);
typedef typename VectorType::Scalar Scalar;
Index n = diag.size();
diag.setZero();
for (Index i = 0; i < n - 1; ++i) offdiag(i) = sqrt(Scalar(i + 1) * Scalar(n - 1 - i));
}
// 7. Kahan-style: d_i = s^i, e_i = -c*s^i with s=sin(theta), c=cos(theta).
// Geometric decay with controlled condition number.
template <typename VectorType>
void tridiag_kahan(VectorType& diag, VectorType& offdiag,
typename VectorType::Scalar theta = typename VectorType::Scalar(0.3)) {
EIGEN_USING_STD(sin);
EIGEN_USING_STD(cos);
EIGEN_USING_STD(pow);
EIGEN_USING_STD(log);
typedef typename VectorType::Scalar Scalar;
Index n = diag.size();
const Scalar eps = NumTraits<Scalar>::epsilon();
const Scalar s = sin(theta);
const Scalar c = cos(theta);
const Scalar maxPower = -log(eps) / (-log(s));
for (Index i = 0; i < n; ++i) diag(i) = pow(s, numext::mini(Scalar(i), maxPower));
for (Index i = 0; i < n - 1; ++i) offdiag(i) = -c * pow(s, numext::mini(Scalar(i), maxPower));
}
// 8. Graded: d_i = base^(-i), e_i = base^(-i)
template <typename VectorType>
void tridiag_graded(VectorType& diag, VectorType& offdiag,
typename VectorType::Scalar base = typename VectorType::Scalar(10)) {
EIGEN_USING_STD(pow);
typedef typename VectorType::Scalar Scalar;
Index n = diag.size();
for (Index i = 0; i < n; ++i) diag(i) = pow(base, -Scalar(i));
for (Index i = 0; i < n - 1; ++i) offdiag(i) = pow(base, -Scalar(i));
}
// 9. Geometric decay diagonal: d_i = base^i, e=[0,...,0]
template <typename VectorType>
void tridiag_geometric_diagonal(VectorType& diag, VectorType& offdiag,
typename VectorType::Scalar base = typename VectorType::Scalar(0.5)) {
EIGEN_USING_STD(pow);
EIGEN_USING_STD(log);
typedef typename VectorType::Scalar Scalar;
Index n = diag.size();
const Scalar eps = NumTraits<Scalar>::epsilon();
const Scalar maxPower = -log(eps) / (-log(base));
for (Index i = 0; i < n; ++i) diag(i) = pow(base, numext::mini(Scalar(i), maxPower));
offdiag.setZero();
}
// 10. Geometric decay offdiagonal: d=[1,...,1], e_i = base^i
template <typename VectorType>
void tridiag_geometric_offdiag(VectorType& diag, VectorType& offdiag,
typename VectorType::Scalar base = typename VectorType::Scalar(0.5)) {
EIGEN_USING_STD(pow);
typedef typename VectorType::Scalar Scalar;
Index n = diag.size();
diag.setOnes();
for (Index i = 0; i < n - 1; ++i) offdiag(i) = pow(base, Scalar(i));
}
// 11. Clustered eigenvalues: d_i = 1 + i*eps, e=[0,...,0]
template <typename VectorType>
void tridiag_clustered(VectorType& diag, VectorType& offdiag) {
typedef typename VectorType::Scalar Scalar;
Index n = diag.size();
const Scalar eps = NumTraits<Scalar>::epsilon();
for (Index i = 0; i < n; ++i) diag(i) = Scalar(1) + Scalar(i) * eps;
offdiag.setZero();
}
// 12. Two clusters: half at 1, half at eps.
template <typename VectorType>
void tridiag_two_clusters(VectorType& diag, VectorType& offdiag) {
typedef typename VectorType::Scalar Scalar;
Index n = diag.size();
const Scalar eps = NumTraits<Scalar>::epsilon();
for (Index i = 0; i < n; ++i) diag(i) = (i < n / 2) ? Scalar(1) : eps;
offdiag.setZero();
}
// 13. Single tiny value: d=[1,...,1,eps], e=[eps^2,...,eps^2]
template <typename VectorType>
void tridiag_single_tiny(VectorType& diag, VectorType& offdiag) {
typedef typename VectorType::Scalar Scalar;
Index n = diag.size();
const Scalar eps = NumTraits<Scalar>::epsilon();
diag.setOnes();
diag(n - 1) = eps;
offdiag.setConstant(eps * eps);
}
// 14. Overflow/underflow: alternating big/tiny diagonal and offdiagonal.
template <typename VectorType>
void tridiag_overflow_underflow(VectorType& diag, VectorType& offdiag) {
typedef typename VectorType::Scalar Scalar;
Index n = diag.size();
const Scalar big = (std::numeric_limits<Scalar>::max)() / Scalar(1000);
const Scalar tiny = (std::numeric_limits<Scalar>::min)() * Scalar(1000);
for (Index i = 0; i < n; ++i) diag(i) = (i % 2 == 0) ? big : tiny;
for (Index i = 0; i < n - 1; ++i) offdiag(i) = (i % 2 == 0) ? tiny : big;
}
// 15. Prescribed condition number: d_i = kappa^(-i/(n-1)), e_i = eps * random.
template <typename VectorType>
void tridiag_prescribed_cond(VectorType& diag, VectorType& offdiag) {
EIGEN_USING_STD(pow);
EIGEN_USING_STD(abs);
typedef typename VectorType::Scalar Scalar;
Index n = diag.size();
const Scalar eps = NumTraits<Scalar>::epsilon();
const Scalar kappa = Scalar(1) / eps;
for (Index i = 0; i < n; ++i) diag(i) = pow(kappa, -Scalar(i) / Scalar(n - 1));
for (Index i = 0; i < n - 1; ++i) offdiag(i) = eps * abs(internal::random<Scalar>());
}
// 16. Rank-deficient: d=[1,..,0,..,0,..,1], e=[0,...,0]
template <typename VectorType>
void tridiag_rank_deficient(VectorType& diag, VectorType& offdiag) {
typedef typename VectorType::Scalar Scalar;
Index n = diag.size();
for (Index i = 0; i < n; ++i) diag(i) = (i < n / 3 || i >= 2 * n / 3) ? Scalar(1) : Scalar(0);
offdiag.setZero();
}
// 17. Arrowhead-like: d_i = linspace(1,n), e_i = 1/(i+1)
template <typename VectorType>
void tridiag_arrowhead(VectorType& diag, VectorType& offdiag) {
typedef typename VectorType::Scalar Scalar;
Index n = diag.size();
for (Index i = 0; i < n; ++i) diag(i) = Scalar(1) + Scalar(i);
for (Index i = 0; i < n - 1; ++i) offdiag(i) = Scalar(1) / Scalar(i + 1);
}
// 18. Repeated values: d=[1,2,3,1,2,3,...], e=[0,...,0]
template <typename VectorType>
void tridiag_repeated(VectorType& diag, VectorType& offdiag) {
typedef typename VectorType::Scalar Scalar;
Index n = diag.size();
for (Index i = 0; i < n; ++i) diag(i) = Scalar((i % 3) + 1);
offdiag.setZero();
}
// 19. Glued blocks: d=[1,...,1], e=0 except e[n/2-1]=eps.
// Two identity blocks coupled by a tiny off-diagonal entry.
template <typename VectorType>
void tridiag_glued(VectorType& diag, VectorType& offdiag) {
typedef typename VectorType::Scalar Scalar;
Index n = diag.size();
diag.setOnes();
offdiag.setZero();
if (n > 2) offdiag(n / 2 - 1) = NumTraits<Scalar>::epsilon();
}
// 20. Nearly diagonal: random diag, eps * random offdiag.
template <typename VectorType>
void tridiag_nearly_diagonal(VectorType& diag, VectorType& offdiag) {
EIGEN_USING_STD(abs);
typedef typename VectorType::Scalar Scalar;
Index n = diag.size();
const Scalar eps = NumTraits<Scalar>::epsilon();
diag = VectorType::Random(n).cwiseAbs() + VectorType::Constant(n, Scalar(0.1));
for (Index i = 0; i < n - 1; ++i) offdiag(i) = eps * (Scalar(0.5) + abs(internal::random<Scalar>()));
}
// 21. Negative eigenvalues: d_i = -i, e=[1,...,1]
// (Only meaningful for symmetric eigenvalue problems, not SVD.)
template <typename VectorType>
void tridiag_negative(VectorType& diag, VectorType& offdiag) {
typedef typename VectorType::Scalar Scalar;
Index n = diag.size();
for (Index i = 0; i < n; ++i) diag(i) = -Scalar(i + 1);
offdiag.setOnes();
}
// 22. Mixed sign diagonal: d_i = (-1)^i * (i+1), e=[1,...,1]
// (Only meaningful for symmetric eigenvalue problems, not SVD.)
template <typename VectorType>
void tridiag_mixed_sign(VectorType& diag, VectorType& offdiag) {
typedef typename VectorType::Scalar Scalar;
Index n = diag.size();
for (Index i = 0; i < n; ++i) diag(i) = ((i % 2 == 0) ? Scalar(1) : Scalar(-1)) * Scalar(i + 1);
offdiag.setOnes();
}
// Helper: iterate over a set of sizes and call a functor with each (diag, offdiag) pair
// generated by a generator function.
//
// Usage:
// for_tridiag_sizes([](auto& diag, auto& offdiag) {
// tridiag_wilkinson(diag, offdiag);
// my_verify(diag, offdiag);
// });
template <typename Scalar, typename Func>
void for_tridiag_sizes(Func&& func) {
const int sizes[] = {1, 2, 3, 5, 10, 16, 20, 50, 100};
typedef Matrix<Scalar, Dynamic, 1> VectorType;
for (int si = 0; si < int(sizeof(sizes) / sizeof(sizes[0])); ++si) {
const Index n = sizes[si];
VectorType diag(n), offdiag(n > 1 ? n - 1 : 0);
func(diag, offdiag);
}
}
// Helper: run all generators (suitable for both SVD and eigenvalue problems).
// The callback receives (diag, offdiag) after each generator fills them.
template <typename Scalar, typename Func>
void for_all_tridiag_test_matrices(Func&& verify) {
const int sizes[] = {1, 2, 3, 5, 10, 16, 20, 50, 100};
typedef Matrix<Scalar, Dynamic, 1> VectorType;
for (int si = 0; si < int(sizeof(sizes) / sizeof(sizes[0])); ++si) {
const Index n = sizes[si];
VectorType diag(n), offdiag(n > 1 ? n - 1 : 0);
tridiag_identity(diag, offdiag);
verify(diag, offdiag);
tridiag_zero(diag, offdiag);
verify(diag, offdiag);
tridiag_constant(diag, offdiag);
verify(diag, offdiag);
tridiag_1_2_1(diag, offdiag);
verify(diag, offdiag);
tridiag_wilkinson(diag, offdiag);
verify(diag, offdiag);
if (n > 1) {
tridiag_clement(diag, offdiag);
verify(diag, offdiag);
}
tridiag_kahan(diag, offdiag);
verify(diag, offdiag);
tridiag_graded(diag, offdiag);
verify(diag, offdiag);
tridiag_geometric_diagonal(diag, offdiag);
verify(diag, offdiag);
if (n > 1) {
tridiag_geometric_offdiag(diag, offdiag);
verify(diag, offdiag);
}
tridiag_clustered(diag, offdiag);
verify(diag, offdiag);
tridiag_two_clusters(diag, offdiag);
verify(diag, offdiag);
tridiag_single_tiny(diag, offdiag);
verify(diag, offdiag);
tridiag_overflow_underflow(diag, offdiag);
verify(diag, offdiag);
if (n > 1) {
tridiag_prescribed_cond(diag, offdiag);
verify(diag, offdiag);
}
tridiag_rank_deficient(diag, offdiag);
verify(diag, offdiag);
tridiag_arrowhead(diag, offdiag);
verify(diag, offdiag);
tridiag_repeated(diag, offdiag);
verify(diag, offdiag);
tridiag_glued(diag, offdiag);
verify(diag, offdiag);
tridiag_nearly_diagonal(diag, offdiag);
verify(diag, offdiag);
}
}
// Helper: run all generators, including those with negative values
// (suitable only for symmetric eigenvalue problems, not SVD).
template <typename Scalar, typename Func>
void for_all_symmetric_tridiag_test_matrices(Func&& verify) {
for_all_tridiag_test_matrices<Scalar>(verify);
const int sizes[] = {1, 2, 3, 5, 10, 16, 20, 50, 100};
typedef Matrix<Scalar, Dynamic, 1> VectorType;
for (int si = 0; si < int(sizeof(sizes) / sizeof(sizes[0])); ++si) {
const Index n = sizes[si];
VectorType diag(n), offdiag(n > 1 ? n - 1 : 0);
tridiag_negative(diag, offdiag);
verify(diag, offdiag);
tridiag_mixed_sign(diag, offdiag);
verify(diag, offdiag);
}
}
} // namespace test
} // namespace Eigen
#endif // EIGEN_TEST_TRIDIAG_TEST_MATRICES_H