diff --git a/test/array_for_matrix.cpp b/test/array_for_matrix.cpp index 42bc6a43e..f07243419 100644 --- a/test/array_for_matrix.cpp +++ b/test/array_for_matrix.cpp @@ -273,6 +273,45 @@ void resize(const MatrixTraits& t) { VERIFY(a1.size() == cols); } +// Test lpNorm for matrices (not just vectors). +// lpNorm treats the matrix as a flat coefficient vector, +// so the reference is computed element-by-element. +template +void lpNorm_matrix(const MatrixType& m) { + using std::abs; + using std::pow; + using std::sqrt; + typedef typename MatrixType::RealScalar RealScalar; + + Index rows = m.rows(); + Index cols = m.cols(); + MatrixType u = MatrixType::Random(rows, cols); + + // L1 norm + RealScalar ref_l1(0); + for (Index j = 0; j < cols; ++j) + for (Index i = 0; i < rows; ++i) ref_l1 += abs(u(i, j)); + VERIFY_IS_APPROX(u.template lpNorm<1>(), ref_l1); + + // L2 norm + RealScalar ref_l2_sq(0); + for (Index j = 0; j < cols; ++j) + for (Index i = 0; i < rows; ++i) ref_l2_sq += numext::abs2(u(i, j)); + VERIFY_IS_APPROX(u.template lpNorm<2>(), sqrt(ref_l2_sq)); + + // L-Infinity norm + RealScalar ref_linf(0); + for (Index j = 0; j < cols; ++j) + for (Index i = 0; i < rows; ++i) ref_linf = (std::max)(ref_linf, abs(u(i, j))); + VERIFY_IS_APPROX(u.template lpNorm(), ref_linf); + + // L5 norm + RealScalar ref_l5(0); + for (Index j = 0; j < cols; ++j) + for (Index i = 0; i < rows; ++i) ref_l5 += pow(abs(u(i, j)), RealScalar(5)); + VERIFY_IS_APPROX(u.template lpNorm<5>(), pow(ref_l5, RealScalar(1) / RealScalar(5))); +} + template void regression_bug_654() { ArrayXf a = RowVectorXf(3); @@ -346,4 +385,17 @@ EIGEN_DECLARE_TEST(array_for_matrix) { } CALL_SUBTEST_6(regression_bug_654<0>()); CALL_SUBTEST_6(regrrssion_bug_1410<0>()); + + // lpNorm for matrices (not just vectors) + for (int i = 0; i < g_repeat; i++) { + CALL_SUBTEST_9(lpNorm_matrix(Matrix2f())); + CALL_SUBTEST_9(lpNorm_matrix(Matrix4d())); + CALL_SUBTEST_9(lpNorm_matrix( + MatrixXf(internal::random(1, EIGEN_TEST_MAX_SIZE), internal::random(1, EIGEN_TEST_MAX_SIZE)))); + CALL_SUBTEST_9(lpNorm_matrix(MatrixXcd(internal::random(1, EIGEN_TEST_MAX_SIZE / 2), + internal::random(1, EIGEN_TEST_MAX_SIZE / 2)))); + } + // empty matrix + CALL_SUBTEST_9(lpNorm_matrix(MatrixXf(0, 0))); + CALL_SUBTEST_9(lpNorm_matrix(MatrixXf(0, 3))); } diff --git a/test/product_selfadjoint.cpp b/test/product_selfadjoint.cpp index b61f30090..96c089f4a 100644 --- a/test/product_selfadjoint.cpp +++ b/test/product_selfadjoint.cpp @@ -63,6 +63,70 @@ void product_selfadjoint(const MatrixType& m) { VERIFY_IS_APPROX(m1 * m4, m2.template selfadjointView() * m4); } +// Test selfadjoint products at blocking boundary sizes. +// The existing test uses random sizes; this tests deterministic sizes +// at transitions (especially around the GEBP early-return threshold of 48). +template +void product_selfadjoint_boundary() { + typedef double Scalar; + typedef Matrix Mat; + typedef Matrix Vec; + + const int sizes[] = {1, 2, 3, 4, 8, 16, 47, 48, 49, 64, 96, 128}; + for (int si = 0; si < 12; ++si) { + int n = sizes[si]; + Mat m1 = Mat::Random(n, n); + m1 = (m1 + m1.transpose()).eval(); // make symmetric + + Vec v1 = Vec::Random(n); + Mat rhs = Mat::Random(n, 5); + + // Lower selfadjointView * vector + Mat m2 = m1.triangularView(); + VERIFY_IS_APPROX(m2.selfadjointView() * v1, m1 * v1); + + // Upper selfadjointView * vector + m2 = m1.triangularView(); + VERIFY_IS_APPROX(m2.selfadjointView() * v1, m1 * v1); + + // selfadjointView * matrix + m2 = m1.triangularView(); + VERIFY_IS_APPROX(m2.selfadjointView() * rhs, m1 * rhs); + + // rankUpdate + Vec v2 = Vec::Random(n); + m2 = m1.triangularView(); + m2.selfadjointView().rankUpdate(v1, v2); + VERIFY_IS_APPROX(m2, (m1 + v1 * v2.transpose() + v2 * v1.transpose()).triangularView().toDenseMatrix()); + } +} + +// Same test for complex type (tests conjugation logic). +template +void product_selfadjoint_boundary_complex() { + typedef std::complex Scalar; + typedef Matrix Mat; + typedef Matrix Vec; + + const int sizes[] = {1, 8, 47, 48, 49, 64}; + for (int si = 0; si < 6; ++si) { + int n = sizes[si]; + Mat m1 = Mat::Random(n, n); + m1 = (m1 + m1.adjoint()).eval(); // make Hermitian + m1.diagonal() = m1.diagonal().real().template cast(); // real diagonal + + Vec v1 = Vec::Random(n); + Mat rhs = Mat::Random(n, 3); + + Mat m2 = m1.triangularView(); + VERIFY_IS_APPROX(m2.selfadjointView() * v1, m1 * v1); + VERIFY_IS_APPROX(m2.selfadjointView() * rhs, m1 * rhs); + + m2 = m1.triangularView(); + VERIFY_IS_APPROX(m2.selfadjointView() * v1, m1 * v1); + } +} + EIGEN_DECLARE_TEST(product_selfadjoint) { int s = 0; for (int i = 0; i < g_repeat; i++) { @@ -86,4 +150,8 @@ EIGEN_DECLARE_TEST(product_selfadjoint) { CALL_SUBTEST_7(product_selfadjoint(Matrix(s, s))); TEST_SET_BUT_UNUSED_VARIABLE(s) } + + // Deterministic blocking boundary tests (outside g_repeat). + CALL_SUBTEST_8(product_selfadjoint_boundary<0>()); + CALL_SUBTEST_9(product_selfadjoint_boundary_complex<0>()); } diff --git a/test/vectorwiseop.cpp b/test/vectorwiseop.cpp index bccfe70cd..b30142d66 100644 --- a/test/vectorwiseop.cpp +++ b/test/vectorwiseop.cpp @@ -321,6 +321,58 @@ void vectorwiseop_mixedscalar() { VERIFY_IS_CWISE_EQUAL(c, d); } +// Test partial reductions on RowMajor matrices. +// The existing tests only use ColMajor matrices. +template +void vectorwiseop_rowmajor() { + typedef Matrix RowMajorMatrix; + typedef Matrix ColMajorMatrix; + typedef typename NumTraits::Real RealScalar; + typedef Matrix RealRowVectorType; + typedef Matrix RealColVectorType; + + const Index rows = 7; + const Index cols = 11; + ColMajorMatrix mc = ColMajorMatrix::Random(rows, cols); + RowMajorMatrix mr = mc; // same data, different storage + + // Partial reductions should give the same result regardless of storage order. + VERIFY_IS_APPROX(mc.colwise().sum(), mr.colwise().sum()); + VERIFY_IS_APPROX(mc.rowwise().sum(), mr.rowwise().sum()); + VERIFY_IS_APPROX(mc.colwise().prod(), mr.colwise().prod()); + VERIFY_IS_APPROX(mc.rowwise().prod(), mr.rowwise().prod()); + VERIFY_IS_APPROX(mc.colwise().squaredNorm(), mr.colwise().squaredNorm()); + VERIFY_IS_APPROX(mc.rowwise().squaredNorm(), mr.rowwise().squaredNorm()); + VERIFY_IS_APPROX(mc.colwise().norm(), mr.colwise().norm()); + VERIFY_IS_APPROX(mc.rowwise().norm(), mr.rowwise().norm()); + + RealRowVectorType rr_c, rr_r; + RealColVectorType rc_c, rc_r; + rr_c = mc.real().colwise().minCoeff(); + rr_r = mr.real().colwise().minCoeff(); + VERIFY_IS_APPROX(rr_c, rr_r); + rr_c = mc.real().colwise().maxCoeff(); + rr_r = mr.real().colwise().maxCoeff(); + VERIFY_IS_APPROX(rr_c, rr_r); + rc_c = mc.real().rowwise().minCoeff(); + rc_r = mr.real().rowwise().minCoeff(); + VERIFY_IS_APPROX(rc_c, rc_r); + rc_c = mc.real().rowwise().maxCoeff(); + rc_r = mr.real().rowwise().maxCoeff(); + VERIFY_IS_APPROX(rc_c, rc_r); + + // Broadcast operations + typedef Matrix ColVectorType; + typedef Matrix RowVectorType; + ColVectorType cv = ColVectorType::Random(rows); + RowVectorType rv = RowVectorType::Random(cols); + + VERIFY_IS_APPROX(ColMajorMatrix(mc.colwise() + cv), ColMajorMatrix(mr.colwise() + cv)); + VERIFY_IS_APPROX(ColMajorMatrix(mc.rowwise() + rv), ColMajorMatrix(mr.rowwise() + rv)); + VERIFY_IS_APPROX(ColMajorMatrix(mc.colwise() - cv), ColMajorMatrix(mr.colwise() - cv)); + VERIFY_IS_APPROX(ColMajorMatrix(mc.rowwise() - rv), ColMajorMatrix(mr.rowwise() - rv)); +} + EIGEN_DECLARE_TEST(vectorwiseop) { CALL_SUBTEST_1(vectorwiseop_array(Array22cd())); CALL_SUBTEST_2(vectorwiseop_array(Array())); @@ -336,4 +388,9 @@ EIGEN_DECLARE_TEST(vectorwiseop) { CALL_SUBTEST_8(vectorwiseop_mixedscalar()); CALL_SUBTEST_9(vectorwiseop_array_integer( ArrayXXi(internal::random(1, EIGEN_TEST_MAX_SIZE), internal::random(1, EIGEN_TEST_MAX_SIZE)))); + + // RowMajor partial reductions (deterministic, outside g_repeat). + CALL_SUBTEST_10(vectorwiseop_rowmajor()); + CALL_SUBTEST_10(vectorwiseop_rowmajor()); + CALL_SUBTEST_10(vectorwiseop_rowmajor>()); }