diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h index 99e5c25c2..dd208e3eb 100644 --- a/Eigen/src/Cholesky/LDLT.h +++ b/Eigen/src/Cholesky/LDLT.h @@ -225,7 +225,7 @@ class LDLT : public SolverBase > { /** \returns the internal LDLT decomposition matrix * - * TODO: document the storage layout + * TODO: document the storage layout. */ inline const MatrixType& matrixLDLT() const { eigen_assert(m_isInitialized && "LDLT is not initialized."); diff --git a/Eigen/src/Core/ArrayBase.h b/Eigen/src/Core/ArrayBase.h index 8465f54fe..675b0bb51 100644 --- a/Eigen/src/Core/ArrayBase.h +++ b/Eigen/src/Core/ArrayBase.h @@ -178,9 +178,6 @@ class ArrayBase : public DenseBase { return MatrixWrapper(derived()); } - // template - // inline void evalTo(Dest& dst) const { dst = matrix(); } - protected: EIGEN_DEFAULT_COPY_CONSTRUCTOR(ArrayBase) EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(ArrayBase) diff --git a/Eigen/src/Core/AssignEvaluator.h b/Eigen/src/Core/AssignEvaluator.h index e931612d2..17e21c436 100644 --- a/Eigen/src/Core/AssignEvaluator.h +++ b/Eigen/src/Core/AssignEvaluator.h @@ -63,7 +63,7 @@ struct copy_using_evaluator_traits { static constexpr int RestrictedLinearSize = min_size_prefer_fixed(MaxSizeAtCompileTime, MaxPacketSize); static constexpr int OuterStride = outer_stride_at_compile_time::ret; - // TODO distinguish between linear traversal and inner-traversals + // TODO: distinguish between linear traversal and inner-traversal packet types. using LinearPacketType = typename find_best_packet::type; using InnerPacketType = typename find_best_packet::type; @@ -1016,7 +1016,7 @@ struct Assignment diff --git a/Eigen/src/Core/CwiseBinaryOp.h b/Eigen/src/Core/CwiseBinaryOp.h index e3051a796..f225b6fff 100644 --- a/Eigen/src/Core/CwiseBinaryOp.h +++ b/Eigen/src/Core/CwiseBinaryOp.h @@ -98,7 +98,7 @@ class CwiseBinaryOp : public CwiseBinaryOpImpl RhsNested_; #if EIGEN_COMP_MSVC - // Required for Visual Studio or the Copy constructor will probably not get inlined! + // Required for Visual Studio, which may fail to inline the copy constructor otherwise. EIGEN_STRONG_INLINE CwiseBinaryOp(const CwiseBinaryOp&) = default; #endif diff --git a/Eigen/src/Core/DenseBase.h b/Eigen/src/Core/DenseBase.h index dcadcbfb5..994e485eb 100644 --- a/Eigen/src/Core/DenseBase.h +++ b/Eigen/src/Core/DenseBase.h @@ -431,8 +431,7 @@ class DenseBase // By default, the fastest version with undefined NaN propagation semantics is // used. - // TODO(rmlarsen): Replace with default template argument when we move to - // c++11 or beyond. + // TODO(rmlarsen): Replace with default template argument (C++14 is now the minimum standard). EIGEN_DEVICE_FUNC inline typename internal::traits::Scalar minCoeff() const { return minCoeff(); } @@ -449,7 +448,7 @@ class DenseBase template EIGEN_DEVICE_FUNC typename internal::traits::Scalar maxCoeff(IndexType* index) const; - // TODO(rmlarsen): Replace these methods with a default template argument. + // TODO(rmlarsen): Replace these methods with a default template argument (C++14 is now the minimum standard). template EIGEN_DEVICE_FUNC inline typename internal::traits::Scalar minCoeff(IndexType* row, IndexType* col) const { return minCoeff(row, col); @@ -580,12 +579,12 @@ class DenseBase #else typedef std::conditional_t<(Flags & DirectAccessBit) == DirectAccessBit, internal::pointer_based_stl_iterator, - internal::generic_randaccess_stl_iterator > + internal::generic_randaccess_stl_iterator> iterator_type; typedef std::conditional_t<(Flags & DirectAccessBit) == DirectAccessBit, internal::pointer_based_stl_iterator, - internal::generic_randaccess_stl_iterator > + internal::generic_randaccess_stl_iterator> const_iterator_type; // Stl-style iterators are supported only for vectors. diff --git a/Eigen/src/Core/GeneralProduct.h b/Eigen/src/Core/GeneralProduct.h index a9da77071..279fd82a5 100644 --- a/Eigen/src/Core/GeneralProduct.h +++ b/Eigen/src/Core/GeneralProduct.h @@ -89,7 +89,7 @@ struct product_type { /* The following allows to select the kind of product at compile time * based on the three dimensions of the product. * This is a compile time mapping from {1,Small,Large}^3 -> {product types} */ -// FIXME I'm not sure the current mapping is the ideal one. +// FIXME: the current compile-time product-type mapping may not be optimal. template struct product_type_selector { enum { ret = OuterProduct }; @@ -193,12 +193,11 @@ struct product_type_selector { * Implementation of Inner Vector Vector Product ***********************************************************************/ -// FIXME : maybe the "inner product" could return a Scalar -// instead of a 1x1 matrix ?? -// Pro: more natural for the user -// Cons: this could be a problem if in a meta unrolled algorithm a matrix-matrix -// product ends up to a row-vector times col-vector product... To tackle this use -// case, we could have a specialization for Block with: operator=(Scalar x); +// FIXME: consider returning a Scalar instead of a 1x1 matrix for inner products. +// Pro: more natural for the user. +// Con: in a meta-unrolled algorithm a matrix-matrix product may reduce to a +// row-vector times column-vector product. To handle this, we could specialize +// Block with operator=(Scalar x). /*********************************************************************** * Implementation of Outer Vector Vector Product diff --git a/Eigen/src/Core/GenericPacketMath.h b/Eigen/src/Core/GenericPacketMath.h index 7d605d841..10c798aaf 100644 --- a/Eigen/src/Core/GenericPacketMath.h +++ b/Eigen/src/Core/GenericPacketMath.h @@ -1329,9 +1329,7 @@ EIGEN_DEVICE_FUNC inline typename unpacket_traits::type predux_max(const /** \internal \returns true if all coeffs of \a a means "true" * It is supposed to be called on values returned by pcmp_*. */ -// not needed yet -// template EIGEN_DEVICE_FUNC inline bool predux_all(const Packet& a) -// { return bool(a); } +// TODO: implement predux_all when needed. /** \internal \returns true if any coeffs of \a a means "true" * It is supposed to be called on values returned by pcmp_*. diff --git a/Eigen/src/Core/MathFunctions.h b/Eigen/src/Core/MathFunctions.h index 4bc5b0faa..b810f4083 100644 --- a/Eigen/src/Core/MathFunctions.h +++ b/Eigen/src/Core/MathFunctions.h @@ -11,7 +11,7 @@ #ifndef EIGEN_MATHFUNCTIONS_H #define EIGEN_MATHFUNCTIONS_H -// TODO this should better be moved to NumTraits +// TODO: consider moving these constants to NumTraits. // Source: WolframAlpha #define EIGEN_PI 3.141592653589793238462643383279502884197169399375105820974944592307816406L #define EIGEN_LOG2E 1.442695040888963407359924681001892137426645954152985934135449406931109219L @@ -390,7 +390,7 @@ struct cast_impl EIGEN_DEVICE_FUNC inline NewType cast(const OldType& x) { @@ -832,8 +832,8 @@ EIGEN_DEVICE_FUNC std::enable_if_t<(std::numeric_limits::has_infinity && !Num template EIGEN_DEVICE_FUNC -std::enable_if_t::has_quiet_NaN || std::numeric_limits::has_signaling_NaN), bool> -isnan_impl(const T&) { + std::enable_if_t::has_quiet_NaN || std::numeric_limits::has_signaling_NaN), bool> + isnan_impl(const T&) { return false; } @@ -1450,9 +1450,9 @@ EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE } template -EIGEN_DEVICE_FUNC -EIGEN_ALWAYS_INLINE std::enable_if_t::IsSigned || NumTraits::IsComplex), typename NumTraits::Real> -abs(const T& x) { +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + std::enable_if_t::IsSigned || NumTraits::IsComplex), typename NumTraits::Real> + abs(const T& x) { return x; } diff --git a/Eigen/src/Core/ProductEvaluators.h b/Eigen/src/Core/ProductEvaluators.h index c703ec392..e96256a2c 100644 --- a/Eigen/src/Core/ProductEvaluators.h +++ b/Eigen/src/Core/ProductEvaluators.h @@ -182,7 +182,7 @@ struct Assignment" expression to save one temporary -// FIXME we could probably enable these rules for any product, i.e., not only Dense and DefaultProduct +// FIXME: consider enabling these rules for all product types, not only Dense and DefaultProduct. template struct evaluator_assume_aliasing< @@ -1158,7 +1158,7 @@ struct generic_product_impl, MatrixShape, PermutationShape, Pr * Products with transpositions matrices ***************************************************************************/ -// FIXME could we unify Transpositions and Permutation into a single "shape"?? +// FIXME: consider unifying Transpositions and Permutation into a single shape. /** \internal * \class transposition_matrix_product diff --git a/Eigen/src/Core/Ref.h b/Eigen/src/Core/Ref.h index 30ec277d0..a2dc50489 100644 --- a/Eigen/src/Core/Ref.h +++ b/Eigen/src/Core/Ref.h @@ -43,7 +43,7 @@ struct traits > OuterStrideMatch = IsVectorAtCompileTime || int(OuterStrideAtCompileTime) == int(Dynamic) || int(OuterStrideAtCompileTime) == int(Derived::OuterStrideAtCompileTime), // NOTE, this indirection of evaluator::Alignment is needed - // to workaround a very strange bug in MSVC related to the instantiation + // to work around an MSVC bug related to the instantiation // of has_*ary_operator in evaluator. // This line is surprisingly very sensitive. For instance, simply adding parenthesis // as "DerivedAlignment = (int(evaluator::Alignment))," will make MSVC fail... diff --git a/Eigen/src/Core/StableNorm.h b/Eigen/src/Core/StableNorm.h index 711ee3fb4..11abb77b0 100644 --- a/Eigen/src/Core/StableNorm.h +++ b/Eigen/src/Core/StableNorm.h @@ -40,8 +40,7 @@ inline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& sc scale = maxCoeff; } - // TODO if the maxCoeff is much much smaller than the current scale, - // then we can neglect this sub vector + // TODO: skip sub-vector when maxCoeff << current scale. if (scale > Scalar(0)) // if scale==0, then bl is 0 ssq += (bl * invScale).squaredNorm(); } diff --git a/Eigen/src/Core/arch/AVX512/PacketMath.h b/Eigen/src/Core/arch/AVX512/PacketMath.h index bfcab5c8d..22271e1a8 100644 --- a/Eigen/src/Core/arch/AVX512/PacketMath.h +++ b/Eigen/src/Core/arch/AVX512/PacketMath.h @@ -1407,12 +1407,12 @@ EIGEN_STRONG_INLINE Packet8l preverse(const Packet8l& a) { template <> EIGEN_STRONG_INLINE Packet16f pabs(const Packet16f& a) { - // _mm512_abs_ps intrinsic not found, so hack around it + // _mm512_abs_ps intrinsic not found, so implement via bitwise AND with sign-bit mask. return _mm512_castsi512_ps(_mm512_and_si512(_mm512_castps_si512(a), _mm512_set1_epi32(0x7fffffff))); } template <> EIGEN_STRONG_INLINE Packet8d pabs(const Packet8d& a) { - // _mm512_abs_ps intrinsic not found, so hack around it + // _mm512_abs_pd intrinsic not found, so implement via bitwise AND with sign-bit mask. return _mm512_castsi512_pd(_mm512_and_si512(_mm512_castpd_si512(a), _mm512_set1_epi64(0x7fffffffffffffff))); } template <> diff --git a/Eigen/src/Core/arch/AVX512/Reductions.h b/Eigen/src/Core/arch/AVX512/Reductions.h index f7b4c25a1..f59b78e27 100644 --- a/Eigen/src/Core/arch/AVX512/Reductions.h +++ b/Eigen/src/Core/arch/AVX512/Reductions.h @@ -55,7 +55,7 @@ EIGEN_STRONG_INLINE int64_t predux(const Packet8l& a) { // MSVC's _mm512_reduce_mul_epi64 is borked, at least up to and including 1939. // alignas(64) int64_t data[] = { 1,1,-1,-1,1,-1,-1,-1 }; // int64_t out = _mm512_reduce_mul_epi64(_mm512_load_epi64(data)); -// produces garbage: 4294967295. It seems to happen whenever the output is supposed to be negative. +// produces garbage: 4294967295. This occurs when the result should be negative. // Fall back to a manual approach: template <> EIGEN_STRONG_INLINE int64_t predux_mul(const Packet8l& a) { diff --git a/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h b/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h index 297df8367..7fdf961d2 100644 --- a/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h +++ b/Eigen/src/Core/arch/Default/GenericPacketMathFunctions.h @@ -294,7 +294,7 @@ EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet plog_impl_double(cons Packet x2 = pmul(x, x); Packet x3 = pmul(x2, x); - // Evaluate the polynomial approximant , probably to improve instruction-level parallelism. + // Evaluate the polynomial in factored form for better instruction-level parallelism. // y = x - 0.5*x^2 + x^3 * polevl( x, P, 5 ) / p1evl( x, Q, 5 ) ); Packet y, y1, y_; y = pmadd(cst_cephes_log_p0, x, cst_cephes_log_p1); @@ -1861,8 +1861,8 @@ struct accurate_log2 { // This function implements the non-trivial case of pow(x,y) where x is // positive and y is (possibly) non-integer. // Formally, pow(x,y) = exp2(y * log2(x)), where exp2(x) is shorthand for 2^x. -// TODO(rmlarsen): We should probably add this as a packet up 'ppow', to make it -// easier to specialize or turn off for specific types and/or backends.x +// TODO(rmlarsen): We should probably add this as a packet op 'ppow', to make it +// easier to specialize or turn off for specific types and/or backends. template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet generic_pow_impl(const Packet& x, const Packet& y) { typedef typename unpacket_traits::type Scalar; diff --git a/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/Eigen/src/Core/products/GeneralBlockPanelKernel.h index 18cd96016..da6b04500 100644 --- a/Eigen/src/Core/products/GeneralBlockPanelKernel.h +++ b/Eigen/src/Core/products/GeneralBlockPanelKernel.h @@ -249,8 +249,8 @@ void evaluateProductBlockingSizesHeuristic(Index& k, Index& m, Index& n, Index n // Here, nc is chosen such that a block of kc x nc of the rhs fit within half of L2. // The second half is implicitly reserved to access the result and lhs coefficients. - // When k, RealScalar, ConjLhs_, false, Arch, P } EIGEN_STRONG_INLINE void loadRhsQuad_impl(const RhsScalar* b, RhsPacket& dest, const true_type&) const { - // FIXME we can do better! - // what we want here is a ploadheight + // FIXME: replace with a dedicated ploadheight operation for more efficient quad loading. RhsScalar tmp[4] = {b[0], b[0], b[1], b[1]}; dest = ploadquad(tmp); } @@ -669,7 +668,7 @@ DoublePacket::half> predux_half( const DoublePacket& a, std::enable_if_t::size >= 16 && !NumTraits::type>::IsComplex>* = 0) { - // yes, that's pretty hackish :( + // Workaround: reduce real packets to half size by reinterpreting as complex. DoublePacket::half> res; typedef std::complex::type> Cplx; typedef typename packet_traits::type CplxPacket; @@ -689,7 +688,7 @@ void loadQuadToDoublePacket(const Scalar* b, DoublePacket& dest, template void loadQuadToDoublePacket(const Scalar* b, DoublePacket& dest, std::enable_if_t::size == 16>* = 0) { - // yes, that's pretty hackish too :( + // Workaround: load quad elements by reinterpreting real packets as complex. typedef typename NumTraits::Real RealScalar; RealScalar r[4] = {numext::real(b[0]), numext::real(b[0]), numext::real(b[1]), numext::real(b[1])}; RealScalar i[4] = {numext::imag(b[0]), numext::imag(b[0]), numext::imag(b[1]), numext::imag(b[1])}; diff --git a/Eigen/src/Core/products/GeneralMatrixMatrix.h b/Eigen/src/Core/products/GeneralMatrixMatrix.h index ebfac0146..2b45dd25b 100644 --- a/Eigen/src/Core/products/GeneralMatrixMatrix.h +++ b/Eigen/src/Core/products/GeneralMatrixMatrix.h @@ -383,8 +383,8 @@ struct generic_product_impl // to determine the following heuristic. // EIGEN_GEMM_TO_COEFFBASED_THRESHOLD is typically defined to 20 in GeneralProduct.h, // unless it has been specialized by the user or for a given architecture. - // Note that the condition rhs.rows()>0 was required because lazy product is (was?) not happy with empty inputs. - // I'm not sure it is still required. + // Note that the condition rhs.rows()>0 was required because lazy product did not handle empty inputs + // correctly. It is unclear whether this guard is still necessary. if ((rhs.rows() + dst.rows() + dst.cols()) < EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows() > 0) lazyproduct::eval_dynamic(dst, lhs, rhs, internal::assign_op()); else { diff --git a/Eigen/src/Core/products/Parallelizer.h b/Eigen/src/Core/products/Parallelizer.h index b1b89ef97..f4625e27f 100644 --- a/Eigen/src/Core/products/Parallelizer.h +++ b/Eigen/src/Core/products/Parallelizer.h @@ -182,7 +182,7 @@ EIGEN_STRONG_INLINE void parallelize_gemm(const Functor& func, Index rows, Index // compute the maximal number of threads from the total amount of work: double work = static_cast(rows) * static_cast(cols) * static_cast(depth); - double kMinTaskSize = 50000; // FIXME improve this heuristic. + double kMinTaskSize = 50000; // FIXME: tune this minimum task-size heuristic based on architecture and scalar type. pb_max_threads = std::max(1, std::min(pb_max_threads, static_cast(work / kMinTaskSize))); // compute the number of threads we are going to use diff --git a/Eigen/src/Core/products/TriangularMatrixVector.h b/Eigen/src/Core/products/TriangularMatrixVector.h index bef4cbaf8..535f48136 100644 --- a/Eigen/src/Core/products/TriangularMatrixVector.h +++ b/Eigen/src/Core/products/TriangularMatrixVector.h @@ -212,7 +212,7 @@ struct trmv_selector { ResScalar actualAlpha = alpha * lhs_alpha * rhs_alpha; // FIXME find a way to allow an inner stride on the result if packet_traits::size==1 - // on, the other hand it is good for the cache to pack the vector anyways... + // On the other hand, it is good for the cache to pack the vector anyways... constexpr bool EvalToDestAtCompileTime = Dest::InnerStrideAtCompileTime == 1; constexpr bool ComplexByReal = (NumTraits::IsComplex) && (!NumTraits::IsComplex); constexpr bool MightCannotUseDest = (Dest::InnerStrideAtCompileTime != 1) || ComplexByReal; diff --git a/Eigen/src/Core/util/ConfigureVectorization.h b/Eigen/src/Core/util/ConfigureVectorization.h index f86d7a322..095735d4f 100644 --- a/Eigen/src/Core/util/ConfigureVectorization.h +++ b/Eigen/src/Core/util/ConfigureVectorization.h @@ -357,7 +357,7 @@ // notice that since these are C headers, the extern "C" is theoretically needed anyways. extern "C" { // In theory we should only include immintrin.h and not the other *mmintrin.h header files directly. -// Doing so triggers some issues with ICC. However old gcc versions seems to not have this file, thus: +// Doing so triggers some issues with ICC. However old gcc versions may not have this file, thus: #if EIGEN_COMP_ICC >= 1110 || EIGEN_COMP_EMSCRIPTEN #include #else @@ -388,7 +388,7 @@ extern "C" { #define EIGEN_VECTORIZE_VSX 1 #define EIGEN_VECTORIZE_FMA #include -// We need to #undef all these ugly tokens defined in +// We need to #undef macros defined by that conflict with standard C++ names. // => use __vector instead of vector #undef bool #undef vector @@ -400,7 +400,7 @@ extern "C" { #define EIGEN_VECTORIZE_ALTIVEC #define EIGEN_VECTORIZE_FMA #include -// We need to #undef all these ugly tokens defined in +// We need to #undef macros defined by that conflict with standard C++ names. // => use __vector instead of vector #undef bool #undef vector diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h index 7aa369d90..dff613252 100644 --- a/Eigen/src/Core/util/Macros.h +++ b/Eigen/src/Core/util/Macros.h @@ -1139,7 +1139,7 @@ EIGEN_DEVICE_FUNC constexpr void ignore_unused_variable(const T&) {} #if EIGEN_COMP_MSVC // NOTE MSVC often gives C4127 warnings with compiletime if statements. See bug 1362. -// This workaround is ugly, but it does the job. +// This workaround suppresses MSVC C4127 warnings for compile-time conditionals. #define EIGEN_CONST_CONDITIONAL(cond) (void)0, cond #else #define EIGEN_CONST_CONDITIONAL(cond) cond diff --git a/Eigen/src/Core/util/MoreMeta.h b/Eigen/src/Core/util/MoreMeta.h index bf88d38ce..a20506e34 100644 --- a/Eigen/src/Core/util/MoreMeta.h +++ b/Eigen/src/Core/util/MoreMeta.h @@ -435,9 +435,8 @@ struct greater_equal_zero_op { /* reductions for lists */ -// using auto -> return value spec makes ICC 13.0 and 13.1 crash here, so we have to hack it -// together in front... (13.0 doesn't work with array_prod/array_reduce/... anyway, but 13.1 -// does... +// Using auto -> return value spec makes ICC 13.0 and 13.1 crash here, +// so the return type is specified explicitly using decltype. template EIGEN_DEVICE_FUNC constexpr decltype(reduce::run((*((Ts*)0))...)) arg_prod(Ts... ts) { return reduce::run(ts...); diff --git a/Eigen/src/Eigenvalues/ComplexSchur.h b/Eigen/src/Eigenvalues/ComplexSchur.h index 086977c42..b9193a2f6 100644 --- a/Eigen/src/Eigenvalues/ComplexSchur.h +++ b/Eigen/src/Eigenvalues/ComplexSchur.h @@ -277,8 +277,8 @@ typename ComplexSchur::ComplexScalar ComplexSchur::compu using std::abs; if ((iter == 10 || iter == 20) && iu > 1) { // exceptional shift, taken from http://www.netlib.org/eispack/comqr.f - return ComplexSchur::ComplexScalar( - abs(numext::real(m_matT.coeff(iu, iu - 1))) + abs(numext::real(m_matT.coeff(iu - 1, iu - 2)))); + return ComplexSchur::ComplexScalar(abs(numext::real(m_matT.coeff(iu, iu - 1))) + + abs(numext::real(m_matT.coeff(iu - 1, iu - 2)))); } // compute the shift as one of the eigenvalues of t, the 2x2 @@ -363,7 +363,7 @@ struct complex_schur_reduce_to_hessenberg { _this.m_hess.compute(matrix); _this.m_matT = _this.m_hess.matrixH().template cast(); if (computeU) { - // This may cause an allocation which seems to be avoidable + // TODO: this temporary allocation could potentially be avoided. MatrixType Q = _this.m_hess.matrixQ(); _this.m_matU = Q.template cast(); } diff --git a/Eigen/src/Eigenvalues/HessenbergDecomposition.h b/Eigen/src/Eigenvalues/HessenbergDecomposition.h index f79ee331a..42fe3fd6e 100644 --- a/Eigen/src/Eigenvalues/HessenbergDecomposition.h +++ b/Eigen/src/Eigenvalues/HessenbergDecomposition.h @@ -317,7 +317,7 @@ namespace internal { * HessenbergDecomposition class until the it is assigned or evaluated for * some other reason (the reference should remain valid during the life time * of this object). This class is the return type of - * HessenbergDecomposition::matrixH(); there is probably no other use for this + * HessenbergDecomposition::matrixH(); there is no other intended use for this * class. */ template diff --git a/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h b/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h index 62227bdc1..38f46f815 100644 --- a/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h +++ b/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h @@ -111,8 +111,7 @@ template inline typename MatrixBase::RealScalar MatrixBase::operatorNorm() const { using std::sqrt; typename Derived::PlainObject m_eval(derived()); - // FIXME if it is really guaranteed that the eigenvalues are already sorted, - // then we don't need to compute a maxCoeff() here, comparing the 1st and last ones is enough. + // FIXME: if eigenvalues are guaranteed to be sorted, comparing the first and last is sufficient. return sqrt((m_eval * m_eval.adjoint()).eval().template selfadjointView().eigenvalues().maxCoeff()); } diff --git a/Eigen/src/Eigenvalues/RealSchur.h b/Eigen/src/Eigenvalues/RealSchur.h index 94bc34dd8..fda3b2eb7 100644 --- a/Eigen/src/Eigenvalues/RealSchur.h +++ b/Eigen/src/Eigenvalues/RealSchur.h @@ -343,7 +343,7 @@ RealSchur& RealSchur::computeFromHessenberg(const HessMa template inline typename MatrixType::Scalar RealSchur::computeNormOfT() { const Index size = m_matT.cols(); - // FIXME to be efficient the following would requires a triangular reduxion code + // FIXME: a triangular reduction would be more efficient here. // Scalar norm = m_matT.upper().cwiseAbs().sum() // + m_matT.bottomLeftCorner(size-1,size-1).diagonal().cwiseAbs().sum(); Scalar norm(0); diff --git a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h index 161e6b572..fb37ff558 100644 --- a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h +++ b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h @@ -548,8 +548,7 @@ EIGEN_DEVICE_FUNC ComputationInfo computeFromTridiagonal_impl(DiagType& diag, Su info = NoConvergence; // Sort eigenvalues and corresponding vectors. - // TODO make the sort optional ? - // TODO use a better sort algorithm !! + // TODO: make the sort optional and use a more efficient sorting algorithm. if (info == Success) { for (Index i = 0; i < n - 1; ++i) { Index k; @@ -653,12 +652,12 @@ struct direct_selfadjoint_eigenvalues { // Shift the matrix to the mean eigenvalue and map the matrix coefficients to [-1:1] to avoid over- and underflow. Scalar shift = mat.trace() / Scalar(3); - // TODO Avoid this copy. Currently it is necessary to suppress bogus values when determining maxCoeff and for - // computing the eigenvectors later + // TODO: avoid this copy. Currently necessary to suppress bogus values when determining maxCoeff and for + // computing the eigenvectors later. MatrixType scaledMat = mat.template selfadjointView(); scaledMat.diagonal().array() -= shift; Scalar scale = scaledMat.cwiseAbs().maxCoeff(); - if (scale > 0) scaledMat /= scale; // TODO for scale==0 we could save the remaining operations + if (scale > 0) scaledMat /= scale; // TODO: skip remaining operations when scale==0. // compute the eigenvalues computeRoots(scaledMat, eivals); diff --git a/Eigen/src/Geometry/Homogeneous.h b/Eigen/src/Geometry/Homogeneous.h index 4159dc6d0..b304b6743 100644 --- a/Eigen/src/Geometry/Homogeneous.h +++ b/Eigen/src/Geometry/Homogeneous.h @@ -247,7 +247,7 @@ struct homogeneous_left_product_impl, Lhs> template EIGEN_DEVICE_FUNC void evalTo(Dest& dst) const { - // FIXME investigate how to allow lazy evaluation of this product when possible + // FIXME: investigate how to allow lazy evaluation of this product when possible. dst = Block < const LhsMatrixTypeNested, LhsMatrixTypeNested::RowsAtCompileTime, LhsMatrixTypeNested::ColsAtCompileTime == Dynamic ? Dynamic @@ -278,7 +278,7 @@ struct homogeneous_right_product_impl, Rhs> template EIGEN_DEVICE_FUNC void evalTo(Dest& dst) const { - // FIXME investigate how to allow lazy evaluation of this product when possible + // FIXME: investigate how to allow lazy evaluation of this product when possible. dst = m_lhs * Block < const RhsNested, RhsNested::RowsAtCompileTime == Dynamic ? Dynamic : RhsNested::RowsAtCompileTime - 1, RhsNested::ColsAtCompileTime > (m_rhs, 0, 0, m_rhs.rows() - 1, m_rhs.cols()); @@ -392,8 +392,7 @@ struct generic_product_impl, DenseShape, Homo } }; -// TODO: the following specialization is to address a regression from 3.2 to 3.3 -// In the future, this path should be optimized. +// TODO: this specialization addresses a performance regression from 3.2 to 3.3; optimize this path. template struct generic_product_impl, TriangularShape, HomogeneousShape, ProductTag> { template diff --git a/Eigen/src/Geometry/Hyperplane.h b/Eigen/src/Geometry/Hyperplane.h index 0fa0319a0..1bdb811ea 100644 --- a/Eigen/src/Geometry/Hyperplane.h +++ b/Eigen/src/Geometry/Hyperplane.h @@ -111,7 +111,7 @@ class Hyperplane { * If the dimension of the ambient space is greater than 2, then there isn't uniqueness, * so an arbitrary choice is made. */ - // FIXME to be consistent with the rest this could be implemented as a static Through function ?? + // FIXME: for consistency, consider implementing as a static Through function. EIGEN_DEVICE_FUNC explicit Hyperplane(const ParametrizedLine& parametrized) { normal() = parametrized.direction().unitOrthogonal(); offset() = -parametrized.origin().dot(normal()); diff --git a/Eigen/src/Geometry/Quaternion.h b/Eigen/src/Geometry/Quaternion.h index b2ccfc6c8..8b71cd8f1 100644 --- a/Eigen/src/Geometry/Quaternion.h +++ b/Eigen/src/Geometry/Quaternion.h @@ -790,7 +790,7 @@ EIGEN_DEVICE_FUNC Quaternion Quaternion::FromT template EIGEN_DEVICE_FUNC inline Quaternion::Scalar> QuaternionBase::inverse() const { - // FIXME should this function be called multiplicativeInverse and conjugate() be called inverse() or opposite() ?? + // FIXME: consider renaming to multiplicativeInverse() and renaming conjugate() to inverse() or opposite(). Scalar n2 = this->squaredNorm(); if (n2 > Scalar(0)) return Quaternion(conjugate().coeffs() / n2); diff --git a/Eigen/src/Geometry/Scaling.h b/Eigen/src/Geometry/Scaling.h index a0604cee1..5bb7c45d9 100644 --- a/Eigen/src/Geometry/Scaling.h +++ b/Eigen/src/Geometry/Scaling.h @@ -80,7 +80,7 @@ class UniformScaling { } /** Concatenates a uniform scaling and a linear transformation matrix */ - // TODO returns an expression + // TODO: return an expression instead of a dense matrix. template inline typename Eigen::internal::plain_matrix_type::type operator*(const MatrixBase& other) const { return other * m_factor; diff --git a/Eigen/src/Householder/BlockHouseholder.h b/Eigen/src/Householder/BlockHouseholder.h index e730b94fe..6a4bc675a 100644 --- a/Eigen/src/Householder/BlockHouseholder.h +++ b/Eigen/src/Householder/BlockHouseholder.h @@ -36,7 +36,7 @@ void make_block_householder_triangular_factor(TriangularFactorType& triFactor, c triFactor.row(i).tail(rt).noalias() = -hCoeffs(i) * vectors.col(i).tail(rs).adjoint() * vectors.bottomRightCorner(rs, rt).template triangularView(); - // FIXME use the following line with .noalias() once the triangular product can work inplace + // FIXME: use the following line with .noalias() once triangular product supports in-place operation. // triFactor.row(i).tail(rt) = triFactor.row(i).tail(rt) * triFactor.bottomRightCorner(rt,rt).template // triangularView(); for (Index j = nbVecs - 1; j > i; --j) { @@ -71,7 +71,7 @@ void apply_block_householder_on_the_left(MatrixType& mat, const VectorsType& vec (VectorsType::MaxColsAtCompileTime == 1 && MatrixType::MaxColsAtCompileTime != 1) ? RowMajor : ColMajor, VectorsType::MaxColsAtCompileTime, MatrixType::MaxColsAtCompileTime> tmp = V.adjoint() * mat; - // FIXME add .noalias() once the triangular product can work inplace + // FIXME: add .noalias() once triangular product supports in-place operation. if (forward) tmp = T.template triangularView() * tmp; else diff --git a/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h b/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h index dd40058ab..a426381e7 100644 --- a/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h +++ b/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h @@ -264,7 +264,7 @@ void IncompleteCholesky::factorize(const MatrixType else m_scale(j) = 1; - // TODO disable scaling if not needed, i.e., if it is roughly uniform? (this will make solve() faster) + // TODO: disable scaling when roughly uniform to speed up solve(). // Scale and compute the shift for the matrix RealScalar mindiag = NumTraits::highest(); diff --git a/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h b/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h index 11ce5e5aa..3a84e55ae 100644 --- a/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h +++ b/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h @@ -244,8 +244,8 @@ void IncompleteLUT::analyzePattern(const MatrixType_& amat // To this end, let's symmetrize the pattern and perform AMD on it. SparseMatrix mat1 = amat; SparseMatrix mat2 = amat.transpose(); - // FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice. - // on the other hand for a really non-symmetric pattern, mat2*mat1 should be preferred... + // FIXME: for a nearly symmetric pattern, mat2+mat1 is appropriate; + // for a highly non-symmetric pattern, mat2*mat1 should be preferred. SparseMatrix AtA = mat2 + mat1; AMDOrdering ordering; ordering(AtA, m_P); diff --git a/Eigen/src/LU/FullPivLU.h b/Eigen/src/LU/FullPivLU.h index 7723b64c9..50a0e2bf8 100644 --- a/Eigen/src/LU/FullPivLU.h +++ b/Eigen/src/LU/FullPivLU.h @@ -571,7 +571,7 @@ MatrixType FullPivLU::reconstructedMatrix() const const Index smalldim = (std::min)(m_lu.rows(), m_lu.cols()); // LU MatrixType res(m_lu.rows(), m_lu.cols()); - // FIXME the .toDenseMatrix() should not be needed... + // FIXME: the .toDenseMatrix() calls should not be needed. res = m_lu.leftCols(smalldim).template triangularView().toDenseMatrix() * m_lu.topRows(smalldim).template triangularView().toDenseMatrix(); @@ -632,10 +632,10 @@ struct kernel_retval > if (abs(dec().matrixLU().coeff(i, i)) > premultiplied_threshold) pivots.coeffRef(p++) = i; eigen_internal_assert(p == rank()); - // we construct a temporaty trapezoid matrix m, by taking the U matrix and - // permuting the rows and cols to bring the nonnegligible pivots to the top of - // the main diagonal. We need that to be able to apply our triangular solvers. - // FIXME when we get triangularView-for-rectangular-matrices, this can be simplified + // Construct a temporary trapezoid matrix m by taking the U matrix and permuting + // the rows and cols to bring the nonnegligible pivots to the top of the main diagonal. + // This is needed to apply our triangular solvers. + // FIXME: simplify once triangularView supports rectangular matrices. Matrix::Options, MaxSmallDimAtCompileTime, MatrixType::MaxColsAtCompileTime> m(dec().matrixLU().block(0, 0, rank(), cols)); diff --git a/Eigen/src/OrderingMethods/Ordering.h b/Eigen/src/OrderingMethods/Ordering.h index 1a6500776..282d09f7d 100644 --- a/Eigen/src/OrderingMethods/Ordering.h +++ b/Eigen/src/OrderingMethods/Ordering.h @@ -22,7 +22,7 @@ namespace internal { * \ingroup OrderingMethods_Module * \param[in] A the input non-symmetric matrix * \param[out] symmat the symmetric pattern A^T+A from the input matrix \a A. - * FIXME: The values should not be considered here + * FIXME: only the sparsity pattern should be used here; values should be ignored. */ template void ordering_helper_at_plus_a(const MatrixType& A, MatrixType& symmat) { diff --git a/Eigen/src/PardisoSupport/PardisoSupport.h b/Eigen/src/PardisoSupport/PardisoSupport.h index 7091eb7fd..90df3cdcd 100644 --- a/Eigen/src/PardisoSupport/PardisoSupport.h +++ b/Eigen/src/PardisoSupport/PardisoSupport.h @@ -185,7 +185,7 @@ class PardisoImpl : public SparseSolverBase { bool symmetric = std::abs(m_type) < 10; m_iparm[0] = 1; // No solver default m_iparm[1] = 2; // use Metis for the ordering - m_iparm[2] = 0; // Reserved. Set to zero. (??Numbers of processors, value of OMP_NUM_THREADS??) + m_iparm[2] = 0; // Reserved. Set to zero. (Was number of processors / OMP_NUM_THREADS.) m_iparm[3] = 0; // No iterative-direct algorithm m_iparm[4] = 0; // No user fill-in reducing permutation m_iparm[5] = 0; // Write solution into x, b is left unchanged diff --git a/Eigen/src/QR/FullPivHouseholderQR.h b/Eigen/src/QR/FullPivHouseholderQR.h index be72c6f12..b91792d73 100644 --- a/Eigen/src/QR/FullPivHouseholderQR.h +++ b/Eigen/src/QR/FullPivHouseholderQR.h @@ -575,8 +575,7 @@ template void FullPivHouseholderQR::_solve_impl(const RhsType& rhs, DstType& dst) const { const Index l_rank = rank(); - // FIXME introduce nonzeroPivots() and use it here. and more generally, - // make the same improvements in this dec as in FullPivLU. + // FIXME: introduce nonzeroPivots() and apply the same improvements as in FullPivLU. if (l_rank == 0) { dst.setZero(); return; diff --git a/Eigen/src/QR/HouseholderQR.h b/Eigen/src/QR/HouseholderQR.h index e55fc0fca..e7173a568 100644 --- a/Eigen/src/QR/HouseholderQR.h +++ b/Eigen/src/QR/HouseholderQR.h @@ -384,7 +384,7 @@ void householder_qr_inplace_unblocked(MatrixQR& mat, HCoeffs& hCoeffs, typename } } -// TODO: add a corresponding public API for updating a QR factorization +// TODO: expose a public API for rank-1 QR update. /** \internal * Basically a modified copy of @c Eigen::internal::householder_qr_inplace_unblocked that * performs a rank-1 update of the QR matrix in compact storage. This function assumes, that diff --git a/Eigen/src/SVD/BDCSVD.h b/Eigen/src/SVD/BDCSVD.h index 817c1e46c..43b4071e2 100644 --- a/Eigen/src/SVD/BDCSVD.h +++ b/Eigen/src/SVD/BDCSVD.h @@ -379,7 +379,7 @@ BDCSVD& BDCSVD::compute_impl(const Mat //**** step 2 - Divide & Conquer m_naiveU.setZero(); m_naiveV.setZero(); - // FIXME this line involves a temporary matrix + // FIXME: this line involves a temporary matrix. m_computed.topRows(diagSize()) = bid.bidiagonal().toDenseMatrix().transpose(); m_computed.template bottomRows<1>().setZero(); divide(0, diagSize() - 1, 0, 0, 0); @@ -429,7 +429,7 @@ void BDCSVD::copyUV(const HouseholderU& householderU, const m_matrixU = MatrixX::Identity(rows(), Ucols); m_matrixU.topLeftCorner(diagSize(), diagSize()) = naiveV.template cast().topLeftCorner(diagSize(), diagSize()); - // FIXME the following conditionals involve temporary buffers + // FIXME: the following conditionals involve temporary buffers. if (m_useQrDecomp) m_matrixU.topLeftCorner(householderU.cols(), diagSize()).applyOnTheLeft(householderU); else @@ -440,7 +440,7 @@ void BDCSVD::copyUV(const HouseholderU& householderU, const m_matrixV = MatrixX::Identity(cols(), Vcols); m_matrixV.topLeftCorner(diagSize(), diagSize()) = naiveU.template cast().topLeftCorner(diagSize(), diagSize()); - // FIXME the following conditionals involve temporary buffers + // FIXME: the following conditionals involve temporary buffers. if (m_useQrDecomp) m_matrixV.topLeftCorner(householderV.cols(), diagSize()).applyOnTheLeft(householderV); else @@ -538,7 +538,7 @@ void BDCSVD::divide(Index firstCol, Index lastCol, Index fi // We use the other algorithm which is more efficient for small // matrices. if (n < m_algoswap) { - // FIXME this block involves temporaries + // FIXME: this block involves temporaries. if (m_compV) { JacobiSVD baseSvd; computeBaseCase(baseSvd, n, firstCol, firstRowW, firstColW, shift); @@ -681,8 +681,8 @@ void BDCSVD::divide(Index firstCol, Index lastCol, Index fi // order except for possibly the (0,0) entry. The computed SVD is stored U, singVals and V, except // that if m_compV is false, then V is not computed. Singular values are sorted in decreasing order. // -// TODO Opportunities for optimization: better root finding algo, better stopping criterion, better -// handling of round-off errors, be consistent in ordering +// TODO: opportunities for optimization: better root-finding algorithm, better stopping criterion, +// better handling of round-off errors, and consistent ordering. // For instance, to solve the secular equation using FMM, see // http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf template diff --git a/Eigen/src/SVD/UpperBidiagonalization.h b/Eigen/src/SVD/UpperBidiagonalization.h index 76bd1b2e0..2cf020e51 100644 --- a/Eigen/src/SVD/UpperBidiagonalization.h +++ b/Eigen/src/SVD/UpperBidiagonalization.h @@ -17,8 +17,8 @@ namespace Eigen { namespace internal { -// UpperBidiagonalization will probably be replaced by a Bidiagonalization class, don't want to make it stable API. -// At the same time, it's useful to keep for now as it's about the only thing that is testing the BandMatrix class. +// UpperBidiagonalization may be replaced by a Bidiagonalization class; not part of stable API. +// Kept for now as it is one of the few tests exercising the BandMatrix class. template class UpperBidiagonalization { diff --git a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h index 3c6e797bd..2beadab9f 100644 --- a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +++ b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h @@ -79,8 +79,8 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r const Index t200 = rows / 11; // 11 == (log2(200)*1.39) const Index t = (rows * 100) / 139; - // FIXME reserve nnz non zeros - // FIXME implement faster sorting algorithms for very small nnz + // FIXME: reserve space for the expected number of non-zeros. + // FIXME: implement faster sorting for very small nnz counts. // if the result is sparse enough => use a quick sort // otherwise => loop through the entire vector // In order to avoid to perform an expensive log2 when the @@ -131,7 +131,7 @@ struct conservative_sparse_sparse_product_selector rhs.cols()) { using ColMajorMatrix = typename sparse_eval::type; diff --git a/Eigen/src/SparseCore/SparseDiagonalProduct.h b/Eigen/src/SparseCore/SparseDiagonalProduct.h index 1f72a6b3c..e02fdaa7c 100644 --- a/Eigen/src/SparseCore/SparseDiagonalProduct.h +++ b/Eigen/src/SparseCore/SparseDiagonalProduct.h @@ -39,7 +39,11 @@ struct product_evaluator, ProductTag, Diagonal : public sparse_diagonal_product_evaluator { typedef Product XprType; - enum { CoeffReadCost = HugeCost, Flags = Rhs::Flags & RowMajorBit, Alignment = 0 }; // FIXME CoeffReadCost & Flags + enum { + CoeffReadCost = HugeCost, + Flags = Rhs::Flags & RowMajorBit, + Alignment = 0 + }; // FIXME: compute proper CoeffReadCost and propagate Flags. typedef sparse_diagonal_product_evaluator @@ -52,7 +56,11 @@ struct product_evaluator, ProductTag, SparseSh : public sparse_diagonal_product_evaluator, Lhs::Flags & RowMajorBit ? SDP_AsCwiseProduct : SDP_AsScalarProduct> { typedef Product XprType; - enum { CoeffReadCost = HugeCost, Flags = Lhs::Flags & RowMajorBit, Alignment = 0 }; // FIXME CoeffReadCost & Flags + enum { + CoeffReadCost = HugeCost, + Flags = Lhs::Flags & RowMajorBit, + Alignment = 0 + }; // FIXME: compute proper CoeffReadCost and propagate Flags. typedef sparse_diagonal_product_evaluator, Lhs::Flags & RowMajorBit ? SDP_AsCwiseProduct : SDP_AsScalarProduct> diff --git a/Eigen/src/SparseCore/SparseMatrix.h b/Eigen/src/SparseCore/SparseMatrix.h index 8fcdfdf19..782e59511 100644 --- a/Eigen/src/SparseCore/SparseMatrix.h +++ b/Eigen/src/SparseCore/SparseMatrix.h @@ -1548,7 +1548,7 @@ SparseMatrix::operator=(const SparseMatrixBase< Eigen::Map(dest.m_outerIndex, dest.outerSize()).setZero(); // pass 1 - // FIXME the above copy could be merged with that pass + // FIXME: merge the above copy into this pass to avoid iterating twice. for (Index j = 0; j < otherCopy.outerSize(); ++j) for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it) ++dest.m_outerIndex[it.index()]; diff --git a/Eigen/src/SparseCore/SparseMatrixBase.h b/Eigen/src/SparseCore/SparseMatrixBase.h index ccbbe98da..acdaf9366 100644 --- a/Eigen/src/SparseCore/SparseMatrixBase.h +++ b/Eigen/src/SparseCore/SparseMatrixBase.h @@ -115,7 +115,7 @@ class SparseMatrixBase : public EigenBase { typedef Transpose TransposeReturnType; typedef Transpose ConstTransposeReturnType; - // FIXME storage order do not match evaluator storage order + // FIXME: storage order may not match evaluator storage order. typedef SparseMatrix PlainObject; /** This is the "real scalar" type; if the \a Scalar type is already real numbers @@ -203,7 +203,7 @@ class SparseMatrixBase : public EigenBase { return derived(); } - SparseMatrixBase() : m_isRValue(false) { /* TODO check flags */ + SparseMatrixBase() : m_isRValue(false) { /* TODO: validate traits flags. */ } template diff --git a/Eigen/src/SparseCore/SparseRef.h b/Eigen/src/SparseCore/SparseRef.h index c205e6ddd..da026af5c 100644 --- a/Eigen/src/SparseCore/SparseRef.h +++ b/Eigen/src/SparseCore/SparseRef.h @@ -117,7 +117,8 @@ class Ref, Options, StrideType> #else template class Ref - : public SparseMapBase // yes, that's weird to use Derived here, but that works! + : public SparseMapBase // Note: 'Derived' is used here intentionally; it resolves + // correctly via CRTP. #endif { typedef SparseMatrix PlainObjectType; diff --git a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h index 6e1c9cf50..84816aef4 100644 --- a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +++ b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h @@ -56,14 +56,11 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r res.reserve(estimated_nnz_prod); double ratioColRes = double(estimated_nnz_prod) / (double(lhs.rows()) * double(rhs.cols())); for (Index j = 0; j < cols; ++j) { - // FIXME: - // double ratioColRes = (double(rhs.innerVector(j).nonZeros()) + - // double(lhs.nonZeros())/double(lhs.cols()))/double(lhs.rows()); - // let's do a more accurate determination of the nnz ratio for the current column j of res + // FIXME: compute a more accurate per-column nnz ratio for res. tempVector.init(ratioColRes); tempVector.setZero(); for (typename evaluator::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt) { - // FIXME should be written like this: tmp += rhsIt.value() * lhs.col(rhsIt.index()) + // FIXME: rewrite as tmp += rhsIt.value() * lhs.col(rhsIt.index()). tempVector.restart(); RhsScalar x = rhsIt.value(); for (typename evaluator::InnerIterator lhsIt(lhsEval, rhsIt.index()); lhsIt; ++lhsIt) { diff --git a/Eigen/src/SparseCore/SparseUtil.h b/Eigen/src/SparseCore/SparseUtil.h index 33cedaf3e..8eedd54c1 100644 --- a/Eigen/src/SparseCore/SparseUtil.h +++ b/Eigen/src/SparseCore/SparseUtil.h @@ -105,7 +105,7 @@ struct sparse_eval { typedef SparseVector type; }; -// TODO this seems almost identical to plain_matrix_type +// TODO: consider unifying with plain_matrix_type. template struct sparse_eval { typedef typename traits::Scalar Scalar_; diff --git a/Eigen/src/SparseCore/TriangularSolver.h b/Eigen/src/SparseCore/TriangularSolver.h index 684de4830..ddc555683 100644 --- a/Eigen/src/SparseCore/TriangularSolver.h +++ b/Eigen/src/SparseCore/TriangularSolver.h @@ -195,7 +195,7 @@ struct sparse_solve_triangular_sparse_selector { res.reserve(other.nonZeros()); for (Index col = 0; col < other.cols(); ++col) { - // FIXME estimate number of non zeros + // FIXME: estimate the number of non-zeros per column for better allocation. tempVector.init(.99 /*float(other.col(col).nonZeros())/float(other.rows())*/); tempVector.setZero(); tempVector.restart(); @@ -230,16 +230,11 @@ struct sparse_solve_triangular_sparse_selector { } } - // Index count = 0; - // FIXME compute a reference value to filter zeros + // FIXME: compute a reference value to filter zeros. for (typename AmbiVector::Iterator it(tempVector /*,1e-12*/); it; ++it) { - // ++ count; - // std::cerr << "fill " << it.index() << ", " << col << "\n"; - // std::cout << it.value() << " "; - // FIXME use insertBack + // FIXME: use insertBack for better performance. res.insert(it.index(), col) = it.value(); } - // std::cout << "tempVector.nonZeros() == " << int(count) << " / " << (other.rows()) << "\n"; } res.finalize(); other = res.markAsRValue(); diff --git a/Eigen/src/SparseLU/SparseLU.h b/Eigen/src/SparseLU/SparseLU.h index 7338c61a3..f1662c51d 100644 --- a/Eigen/src/SparseLU/SparseLU.h +++ b/Eigen/src/SparseLU/SparseLU.h @@ -51,7 +51,7 @@ class SparseLUTransposeView : public SparseSolverBaseinfo() == Success && "The matrix should be factorized first"); EIGEN_STATIC_ASSERT((Dest::Flags & RowMajorBit) == 0, THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); - // this ugly const_cast_derived() helps to detect aliasing when applying the permutations + // const_cast_derived() is needed to enable aliasing detection when applying the permutations. for (Index j = 0; j < B.cols(); ++j) { X.col(j) = m_sparseLU->colsPermutation() * B.const_cast_derived().col(j); } @@ -344,7 +344,7 @@ class SparseLU : public SparseSolverBase>, // on return, X is overwritten by the computed solution X.resize(B.rows(), B.cols()); - // this ugly const_cast_derived() helps to detect aliasing when applying the permutations + // const_cast_derived() is needed to enable aliasing detection when applying the permutations. for (Index j = 0; j < B.cols(); ++j) X.col(j) = rowsPermutation() * B.const_cast_derived().col(j); // Forward substitution with L @@ -603,7 +603,7 @@ void SparseLU::analyzePattern(const MatrixType& mat) { * > A->ncol: number of bytes allocated when memory allocation failure occurred, plus A->ncol. * If lwork = -1, it is the estimated amount of space needed, plus A->ncol. * - * It seems that A was the name of the matrix in the past. + * Note: 'A' in the above description refers to the factored matrix (historical naming from SuperLU). * * \sa analyzePattern(), compute(), SparseLU(), info(), lastErrorMessage() */ @@ -616,7 +616,6 @@ void SparseLU::factorize(const MatrixType& matrix) { m_isInitialized = true; // Apply the column permutation computed in analyzepattern() - // m_mat = matrix * m_perm_c.inverse(); m_mat = matrix; if (m_perm_c.size()) { m_mat.uncompress(); // NOTE: The effect of this command is only to create the InnerNonzeros pointers. @@ -779,7 +778,7 @@ void SparseLU::factorize(const MatrixType& matrix) { } // Update the determinant of the row permutation matrix - // FIXME: the following test is not correct, we should probably take iperm_c into account and pivrow is not + // FIXME: the following test is not correct; it should account for iperm_c, and pivrow is not // directly the row pivot. if (pivrow != jj) m_detPermR = -m_detPermR; diff --git a/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h b/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h index eb1590916..98b7348c7 100644 --- a/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h +++ b/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h @@ -27,11 +27,7 @@ namespace internal { * NOTE : This class corresponds to the SCformat structure in SuperLU * */ -/* TODO - * InnerIterator as for sparsematrix - * SuperInnerIterator to iterate through all supernodes - * Function for triangular solve - */ +// TODO: add InnerIterator, SuperInnerIterator, and triangular solve support. template class MappedSuperNodalMatrix { public: diff --git a/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h b/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h index 8df830b08..86080e448 100644 --- a/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h +++ b/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h @@ -52,7 +52,7 @@ void SparseLUImpl::heap_relax_snode(const Index n, IndexVe IndexVector post; internal::treePostorder(StorageIndex(n), et, post); // Post order etree IndexVector inv_post(n + 1); - for (StorageIndex i = 0; i < n + 1; ++i) inv_post(post(i)) = i; // inv_post = post.inverse()??? + for (StorageIndex i = 0; i < n + 1; ++i) inv_post(post(i)) = i; // Compute the inverse postorder permutation. // Renumber etree in postorder IndexVector iwork(n); diff --git a/Eigen/src/SparseLU/SparseLU_panel_dfs.h b/Eigen/src/SparseLU/SparseLU_panel_dfs.h index df3154845..441239681 100644 --- a/Eigen/src/SparseLU/SparseLU_panel_dfs.h +++ b/Eigen/src/SparseLU/SparseLU_panel_dfs.h @@ -136,12 +136,9 @@ void SparseLUImpl::dfs_kernel(const StorageIndex jj, Index // segment is seen for the first time. (Note that // "repfnz(krep)" may change later.) // Baktrack dfs to its parent - if (traits.update_segrep(krep, jj)) - // if (marker1(krep) < jcol ) - { + if (traits.update_segrep(krep, jj)) { segrep(nseg) = krep; ++nseg; - // marker1(krep) = jj; } kpar = parent(krep); // Pop recursion, mimic recursion diff --git a/Eigen/src/SparseQR/SparseQR.h b/Eigen/src/SparseQR/SparseQR.h index 78c35d814..d11e6649b 100644 --- a/Eigen/src/SparseQR/SparseQR.h +++ b/Eigen/src/SparseQR/SparseQR.h @@ -338,8 +338,7 @@ void SparseQR::analyzePattern(const MatrixType& mat) { m_Q.resize(m, diagSize); // Allocate space for nonzero elements: rough estimation - m_R.reserve(2 * mat.nonZeros()); // FIXME Get a more accurate estimation through symbolic factorization with the - // etree + m_R.reserve(2 * mat.nonZeros()); // FIXME: get a tighter bound via symbolic factorization using the etree. m_Q.reserve(2 * mat.nonZeros()); m_hcoeffs.resize(diagSize); m_analysisIsok = true; @@ -502,7 +501,7 @@ void SparseQR::factorize(const MatrixType& mat) { if (nonzeroCol < diagSize) { // Compute the Householder reflection that eliminate the current column - // FIXME this step should call the Householder module. + // FIXME: refactor to use the Householder module's reflector computation. Scalar c0 = nzcolQ ? tval(Qidx(0)) : Scalar(0); // First, the squared norm of Q((col+1):m, col) @@ -627,7 +626,7 @@ struct SparseQR_QProduct : ReturnByValue @@ -646,14 +645,14 @@ struct SparseQRMatrixQReturnType : public EigenBase transpose() const { return SparseQRMatrixQTransposeReturnType(m_qr); } const SparseQRType& m_qr; }; -// TODO this actually represents the adjoint of Q +// TODO: rename to SparseQRMatrixQAdjointReturnType; this represents the adjoint of Q. template struct SparseQRMatrixQTransposeReturnType { explicit SparseQRMatrixQTransposeReturnType(const SparseQRType& qr) : m_qr(qr) {} diff --git a/Eigen/src/SuperLUSupport/SuperLUSupport.h b/Eigen/src/SuperLUSupport/SuperLUSupport.h index 99f70cf76..7d6fef2e2 100644 --- a/Eigen/src/SuperLUSupport/SuperLUSupport.h +++ b/Eigen/src/SuperLUSupport/SuperLUSupport.h @@ -583,7 +583,7 @@ void SuperLU::factorize(const MatrixType &a) { m_extractedDataAreDirty = true; - // FIXME how to better check for errors ??? + // FIXME: implement more detailed error checking based on SuperLU info codes. m_info = info == 0 ? Success : NumericalIssue; m_factorizationIsOk = true; } @@ -872,7 +872,7 @@ void SuperILU::factorize(const MatrixType &a) { &info, Scalar()); StatFree(&m_sluStat); - // FIXME how to better check for errors ??? + // FIXME: implement more detailed error checking based on SuperLU info codes. m_info = info == 0 ? Success : NumericalIssue; m_factorizationIsOk = true; } diff --git a/bench/BenchTimer.h b/bench/BenchTimer.h index 0b8c63c1d..b3a26fbd6 100644 --- a/bench/BenchTimer.h +++ b/bench/BenchTimer.h @@ -150,9 +150,9 @@ class BenchTimer { #define BENCH(TIMER, TRIES, REP, CODE) \ { \ TIMER.reset(); \ - for (int uglyvarname1 = 0; uglyvarname1 < TRIES; ++uglyvarname1) { \ + for (int bench_tries_ = 0; bench_tries_ < TRIES; ++bench_tries_) { \ TIMER.start(); \ - for (int uglyvarname2 = 0; uglyvarname2 < REP; ++uglyvarname2) { \ + for (int bench_reps_ = 0; bench_reps_ < REP; ++bench_reps_) { \ CODE; \ } \ TIMER.stop(); \ diff --git a/test/geo_alignedbox.cpp b/test/geo_alignedbox.cpp index da49c081f..67ca27ed3 100644 --- a/test/geo_alignedbox.cpp +++ b/test/geo_alignedbox.cpp @@ -257,8 +257,8 @@ void alignedboxRotatable(const BoxType& box, // box((-3, -2, -2), (-1, 0, 0)) IsometryTransform tf2 = IsometryTransform::Identity(); - // for some weird reason the following statement has to be put separate from - // the following rotate call, otherwise precision problems arise... + // The following statement must be separate from the rotate call below, + // otherwise precision problems arise. Rotation rot = rotate(NonInteger(EIGEN_PI)); tf2.rotate(rot); diff --git a/test/indexed_view.cpp b/test/indexed_view.cpp index c0d140da3..180169545 100644 --- a/test/indexed_view.cpp +++ b/test/indexed_view.cpp @@ -356,7 +356,7 @@ void check_indexed_view() { VERIFY_IS_CWISE_EQUAL(R_ref(eigen_matrix_rows, eigen_matrix_cols), R_ref(c_array_rows, c_array_cols)); } - // check mat(i,j) with weird types for i and j + // check mat(i,j) with unusual types for i and j { VERIFY_IS_APPROX(A(B.RowsAtCompileTime - 1, 1), A(3, 1)); VERIFY_IS_APPROX(A(B.RowsAtCompileTime, 1), A(4, 1)); @@ -420,7 +420,7 @@ void check_indexed_view() { // check symbolic indices a(last) = 1.0; A(last, last) = 1; - // check weird non-const, non-lvalue scenarios + // check unusual non-const, non-lvalue scenarios { // in these scenarios, the objects are not declared 'const', and the compiler will attempt to use the non-const // overloads without intervention diff --git a/test/nullary.cpp b/test/nullary.cpp index 9f9cfea14..c2bbe3f4f 100644 --- a/test/nullary.cpp +++ b/test/nullary.cpp @@ -271,7 +271,7 @@ void nullary_internal_logic() { VERIFY((!internal::has_binary_operator >::value)); VERIFY((internal::functor_has_linear_access >::ret)); - // Regression unit test for a weird MSVC bug. + // Regression unit test for an MSVC bug. // Search "nullary_wrapper_workaround_msvc" in CoreEvaluators.h for the details. // See also traits::match. { diff --git a/test/svd_common.h b/test/svd_common.h index 5174adec0..82c61a579 100644 --- a/test/svd_common.h +++ b/test/svd_common.h @@ -124,7 +124,7 @@ void svd_least_square(const MatrixType& m) { if (internal::is_same::value || svd.rank() == m.diagonal().size()) { using std::sqrt; // This test is not stable with single precision. - // This is probably because squaring m signicantly affects the precision. + // This is likely because squaring m significantly affects the precision. if (internal::is_same::value) ++g_test_level; VERIFY_IS_APPROX(m.adjoint() * (m * x), m.adjoint() * rhs); diff --git a/unsupported/Eigen/src/EulerAngles/EulerSystem.h b/unsupported/Eigen/src/EulerAngles/EulerSystem.h index bf8a33456..c4b190be7 100644 --- a/unsupported/Eigen/src/EulerAngles/EulerSystem.h +++ b/unsupported/Eigen/src/EulerAngles/EulerSystem.h @@ -120,8 +120,7 @@ enum EulerAxis { template class EulerSystem { public: - // It's defined this way and not as enum, because I think - // that enum is not guerantee to support negative numbers + // Defined as static constexpr rather than enum to ensure support for negative values. /** The first rotation axis */ static constexpr int AlphaAxis = _AlphaAxis; diff --git a/unsupported/Eigen/src/FFT/fftw_impl.h b/unsupported/Eigen/src/FFT/fftw_impl.h index 0b9ad3dac..588d09e61 100644 --- a/unsupported/Eigen/src/FFT/fftw_impl.h +++ b/unsupported/Eigen/src/FFT/fftw_impl.h @@ -16,8 +16,8 @@ namespace Eigen { namespace internal { -// FFTW uses non-const arguments -// so we must use ugly const_cast calls for all the args it uses +// FFTW uses non-const arguments, +// so const_cast is needed for all the args it uses. // // This should be safe as long as // 1. we use FFTW_ESTIMATE for all our planning diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h b/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h index b11eb7411..34e7531b9 100644 --- a/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h +++ b/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h @@ -21,8 +21,7 @@ namespace internal { // post: sqrtT.block(i,i,2,2) is square root of T.block(i,i,2,2) template void matrix_sqrt_quasi_triangular_2x2_diagonal_block(const MatrixType& T, Index i, ResultType& sqrtT) { - // TODO: This case (2-by-2 blocks with complex conjugate eigenvalues) is probably hidden somewhere - // in EigenSolver. If we expose it, we could call it directly from here. + // TODO: this 2x2 complex-conjugate eigenvalue case could reuse logic from EigenSolver if exposed. typedef typename traits::Scalar Scalar; Matrix block = T.template block<2, 2>(i, i); EigenSolver > es(block); diff --git a/unsupported/Eigen/src/NonLinearOptimization/lmpar.h b/unsupported/Eigen/src/NonLinearOptimization/lmpar.h index 14202012a..343260895 100644 --- a/unsupported/Eigen/src/NonLinearOptimization/lmpar.h +++ b/unsupported/Eigen/src/NonLinearOptimization/lmpar.h @@ -67,8 +67,7 @@ void lmpar(Matrix &r, const VectorXi &ipvt, const Matr l = ipvt[j]; wa1[j] = diag[l] * (wa2[l] / dxnorm); } - // it's actually a triangularView.solveInplace(), though in a weird - // way: + // Triangular solve (forward substitution): for (j = 0; j < n; ++j) { Scalar sum = 0.; for (i = 0; i < j; ++i) sum += r(i, j) * wa1[i];