Add test coverage for matrix lpNorm, RowMajor partial reductions, selfadjoint boundaries

libeigen/eigen!2289

Co-authored-by: Rasmus Munk Larsen <rmlarsen@gmail.com>
This commit is contained in:
Rasmus Munk Larsen
2026-03-12 14:45:51 -07:00
parent 15cae83485
commit 356a9ba1da
3 changed files with 177 additions and 0 deletions

View File

@@ -273,6 +273,45 @@ void resize(const MatrixTraits& t) {
VERIFY(a1.size() == cols);
}
// Test lpNorm for matrices (not just vectors).
// lpNorm treats the matrix as a flat coefficient vector,
// so the reference is computed element-by-element.
template <typename MatrixType>
void lpNorm_matrix(const MatrixType& m) {
using std::abs;
using std::pow;
using std::sqrt;
typedef typename MatrixType::RealScalar RealScalar;
Index rows = m.rows();
Index cols = m.cols();
MatrixType u = MatrixType::Random(rows, cols);
// L1 norm
RealScalar ref_l1(0);
for (Index j = 0; j < cols; ++j)
for (Index i = 0; i < rows; ++i) ref_l1 += abs(u(i, j));
VERIFY_IS_APPROX(u.template lpNorm<1>(), ref_l1);
// L2 norm
RealScalar ref_l2_sq(0);
for (Index j = 0; j < cols; ++j)
for (Index i = 0; i < rows; ++i) ref_l2_sq += numext::abs2(u(i, j));
VERIFY_IS_APPROX(u.template lpNorm<2>(), sqrt(ref_l2_sq));
// L-Infinity norm
RealScalar ref_linf(0);
for (Index j = 0; j < cols; ++j)
for (Index i = 0; i < rows; ++i) ref_linf = (std::max)(ref_linf, abs(u(i, j)));
VERIFY_IS_APPROX(u.template lpNorm<Infinity>(), ref_linf);
// L5 norm
RealScalar ref_l5(0);
for (Index j = 0; j < cols; ++j)
for (Index i = 0; i < rows; ++i) ref_l5 += pow(abs(u(i, j)), RealScalar(5));
VERIFY_IS_APPROX(u.template lpNorm<5>(), pow(ref_l5, RealScalar(1) / RealScalar(5)));
}
template <int>
void regression_bug_654() {
ArrayXf a = RowVectorXf(3);
@@ -346,4 +385,17 @@ EIGEN_DECLARE_TEST(array_for_matrix) {
}
CALL_SUBTEST_6(regression_bug_654<0>());
CALL_SUBTEST_6(regrrssion_bug_1410<0>());
// lpNorm for matrices (not just vectors)
for (int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_9(lpNorm_matrix(Matrix2f()));
CALL_SUBTEST_9(lpNorm_matrix(Matrix4d()));
CALL_SUBTEST_9(lpNorm_matrix(
MatrixXf(internal::random<int>(1, EIGEN_TEST_MAX_SIZE), internal::random<int>(1, EIGEN_TEST_MAX_SIZE))));
CALL_SUBTEST_9(lpNorm_matrix(MatrixXcd(internal::random<int>(1, EIGEN_TEST_MAX_SIZE / 2),
internal::random<int>(1, EIGEN_TEST_MAX_SIZE / 2))));
}
// empty matrix
CALL_SUBTEST_9(lpNorm_matrix(MatrixXf(0, 0)));
CALL_SUBTEST_9(lpNorm_matrix(MatrixXf(0, 3)));
}

View File

@@ -63,6 +63,70 @@ void product_selfadjoint(const MatrixType& m) {
VERIFY_IS_APPROX(m1 * m4, m2.template selfadjointView<Lower>() * m4);
}
// Test selfadjoint products at blocking boundary sizes.
// The existing test uses random sizes; this tests deterministic sizes
// at transitions (especially around the GEBP early-return threshold of 48).
template <int>
void product_selfadjoint_boundary() {
typedef double Scalar;
typedef Matrix<Scalar, Dynamic, Dynamic> Mat;
typedef Matrix<Scalar, Dynamic, 1> Vec;
const int sizes[] = {1, 2, 3, 4, 8, 16, 47, 48, 49, 64, 96, 128};
for (int si = 0; si < 12; ++si) {
int n = sizes[si];
Mat m1 = Mat::Random(n, n);
m1 = (m1 + m1.transpose()).eval(); // make symmetric
Vec v1 = Vec::Random(n);
Mat rhs = Mat::Random(n, 5);
// Lower selfadjointView * vector
Mat m2 = m1.triangularView<Lower>();
VERIFY_IS_APPROX(m2.selfadjointView<Lower>() * v1, m1 * v1);
// Upper selfadjointView * vector
m2 = m1.triangularView<Upper>();
VERIFY_IS_APPROX(m2.selfadjointView<Upper>() * v1, m1 * v1);
// selfadjointView * matrix
m2 = m1.triangularView<Lower>();
VERIFY_IS_APPROX(m2.selfadjointView<Lower>() * rhs, m1 * rhs);
// rankUpdate
Vec v2 = Vec::Random(n);
m2 = m1.triangularView<Lower>();
m2.selfadjointView<Lower>().rankUpdate(v1, v2);
VERIFY_IS_APPROX(m2, (m1 + v1 * v2.transpose() + v2 * v1.transpose()).triangularView<Lower>().toDenseMatrix());
}
}
// Same test for complex type (tests conjugation logic).
template <int>
void product_selfadjoint_boundary_complex() {
typedef std::complex<float> Scalar;
typedef Matrix<Scalar, Dynamic, Dynamic> Mat;
typedef Matrix<Scalar, Dynamic, 1> Vec;
const int sizes[] = {1, 8, 47, 48, 49, 64};
for (int si = 0; si < 6; ++si) {
int n = sizes[si];
Mat m1 = Mat::Random(n, n);
m1 = (m1 + m1.adjoint()).eval(); // make Hermitian
m1.diagonal() = m1.diagonal().real().template cast<Scalar>(); // real diagonal
Vec v1 = Vec::Random(n);
Mat rhs = Mat::Random(n, 3);
Mat m2 = m1.triangularView<Lower>();
VERIFY_IS_APPROX(m2.selfadjointView<Lower>() * v1, m1 * v1);
VERIFY_IS_APPROX(m2.selfadjointView<Lower>() * rhs, m1 * rhs);
m2 = m1.triangularView<Upper>();
VERIFY_IS_APPROX(m2.selfadjointView<Upper>() * v1, m1 * v1);
}
}
EIGEN_DECLARE_TEST(product_selfadjoint) {
int s = 0;
for (int i = 0; i < g_repeat; i++) {
@@ -86,4 +150,8 @@ EIGEN_DECLARE_TEST(product_selfadjoint) {
CALL_SUBTEST_7(product_selfadjoint(Matrix<float, Dynamic, Dynamic, RowMajor>(s, s)));
TEST_SET_BUT_UNUSED_VARIABLE(s)
}
// Deterministic blocking boundary tests (outside g_repeat).
CALL_SUBTEST_8(product_selfadjoint_boundary<0>());
CALL_SUBTEST_9(product_selfadjoint_boundary_complex<0>());
}

View File

@@ -321,6 +321,58 @@ void vectorwiseop_mixedscalar() {
VERIFY_IS_CWISE_EQUAL(c, d);
}
// Test partial reductions on RowMajor matrices.
// The existing tests only use ColMajor matrices.
template <typename Scalar>
void vectorwiseop_rowmajor() {
typedef Matrix<Scalar, Dynamic, Dynamic, RowMajor> RowMajorMatrix;
typedef Matrix<Scalar, Dynamic, Dynamic, ColMajor> ColMajorMatrix;
typedef typename NumTraits<Scalar>::Real RealScalar;
typedef Matrix<RealScalar, 1, Dynamic> RealRowVectorType;
typedef Matrix<RealScalar, Dynamic, 1> RealColVectorType;
const Index rows = 7;
const Index cols = 11;
ColMajorMatrix mc = ColMajorMatrix::Random(rows, cols);
RowMajorMatrix mr = mc; // same data, different storage
// Partial reductions should give the same result regardless of storage order.
VERIFY_IS_APPROX(mc.colwise().sum(), mr.colwise().sum());
VERIFY_IS_APPROX(mc.rowwise().sum(), mr.rowwise().sum());
VERIFY_IS_APPROX(mc.colwise().prod(), mr.colwise().prod());
VERIFY_IS_APPROX(mc.rowwise().prod(), mr.rowwise().prod());
VERIFY_IS_APPROX(mc.colwise().squaredNorm(), mr.colwise().squaredNorm());
VERIFY_IS_APPROX(mc.rowwise().squaredNorm(), mr.rowwise().squaredNorm());
VERIFY_IS_APPROX(mc.colwise().norm(), mr.colwise().norm());
VERIFY_IS_APPROX(mc.rowwise().norm(), mr.rowwise().norm());
RealRowVectorType rr_c, rr_r;
RealColVectorType rc_c, rc_r;
rr_c = mc.real().colwise().minCoeff();
rr_r = mr.real().colwise().minCoeff();
VERIFY_IS_APPROX(rr_c, rr_r);
rr_c = mc.real().colwise().maxCoeff();
rr_r = mr.real().colwise().maxCoeff();
VERIFY_IS_APPROX(rr_c, rr_r);
rc_c = mc.real().rowwise().minCoeff();
rc_r = mr.real().rowwise().minCoeff();
VERIFY_IS_APPROX(rc_c, rc_r);
rc_c = mc.real().rowwise().maxCoeff();
rc_r = mr.real().rowwise().maxCoeff();
VERIFY_IS_APPROX(rc_c, rc_r);
// Broadcast operations
typedef Matrix<Scalar, Dynamic, 1> ColVectorType;
typedef Matrix<Scalar, 1, Dynamic> RowVectorType;
ColVectorType cv = ColVectorType::Random(rows);
RowVectorType rv = RowVectorType::Random(cols);
VERIFY_IS_APPROX(ColMajorMatrix(mc.colwise() + cv), ColMajorMatrix(mr.colwise() + cv));
VERIFY_IS_APPROX(ColMajorMatrix(mc.rowwise() + rv), ColMajorMatrix(mr.rowwise() + rv));
VERIFY_IS_APPROX(ColMajorMatrix(mc.colwise() - cv), ColMajorMatrix(mr.colwise() - cv));
VERIFY_IS_APPROX(ColMajorMatrix(mc.rowwise() - rv), ColMajorMatrix(mr.rowwise() - rv));
}
EIGEN_DECLARE_TEST(vectorwiseop) {
CALL_SUBTEST_1(vectorwiseop_array(Array22cd()));
CALL_SUBTEST_2(vectorwiseop_array(Array<double, 3, 2>()));
@@ -336,4 +388,9 @@ EIGEN_DECLARE_TEST(vectorwiseop) {
CALL_SUBTEST_8(vectorwiseop_mixedscalar());
CALL_SUBTEST_9(vectorwiseop_array_integer(
ArrayXXi(internal::random<int>(1, EIGEN_TEST_MAX_SIZE), internal::random<int>(1, EIGEN_TEST_MAX_SIZE))));
// RowMajor partial reductions (deterministic, outside g_repeat).
CALL_SUBTEST_10(vectorwiseop_rowmajor<float>());
CALL_SUBTEST_10(vectorwiseop_rowmajor<double>());
CALL_SUBTEST_10(vectorwiseop_rowmajor<std::complex<float>>());
}