mirror of
https://gitlab.com/libeigen/eigen.git
synced 2026-04-10 11:34:33 +08:00
Compare commits
14 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3de311f497 | ||
|
|
06fd84cdb1 | ||
|
|
0e5c640563 | ||
|
|
be64619ab6 | ||
|
|
269bf67796 | ||
|
|
15ca6659ac | ||
|
|
d9e5fd393a | ||
|
|
fd7eba3394 | ||
|
|
d671205755 | ||
|
|
3958e7f751 | ||
|
|
164f410bb5 | ||
|
|
a2782964c1 | ||
|
|
ce3984844d | ||
|
|
361225068d |
@@ -1,10 +1,13 @@
|
||||
project(Eigen)
|
||||
set(EIGEN_VERSION_NUMBER "2.0-beta3")
|
||||
set(EIGEN_VERSION_NUMBER "2.0-beta4")
|
||||
|
||||
#if the svnversion program is absent, this will leave the SVN_REVISION string empty,
|
||||
#but won't stop CMake.
|
||||
execute_process(COMMAND svnversion -n ${CMAKE_SOURCE_DIR}
|
||||
OUTPUT_VARIABLE EIGEN_SVN_REVISION)
|
||||
OUTPUT_VARIABLE EIGEN_SVNVERSION_OUTPUT)
|
||||
|
||||
#we only want EIGEN_SVN_REVISION if it is an actual revision number, not a string like "exported"
|
||||
string(REGEX MATCH "^[0-9]+.*" EIGEN_SVN_REVISION "${EIGEN_SVNVERSION_OUTPUT}")
|
||||
|
||||
if(EIGEN_SVN_REVISION)
|
||||
set(EIGEN_VERSION "${EIGEN_VERSION_NUMBER} (SVN revision ${EIGEN_SVN_REVISION})")
|
||||
@@ -12,7 +15,7 @@ else(EIGEN_SVN_REVISION)
|
||||
set(EIGEN_VERSION "${EIGEN_VERSION_NUMBER}")
|
||||
endif(EIGEN_SVN_REVISION)
|
||||
|
||||
cmake_minimum_required(VERSION 2.6)
|
||||
cmake_minimum_required(VERSION 2.6.2)
|
||||
|
||||
set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)
|
||||
|
||||
|
||||
28
Eigen/LeastSquares
Normal file
28
Eigen/LeastSquares
Normal file
@@ -0,0 +1,28 @@
|
||||
#ifndef EIGEN_REGRESSION_MODULE_H
|
||||
#define EIGEN_REGRESSION_MODULE_H
|
||||
|
||||
#include "Core"
|
||||
|
||||
#include "src/Core/util/DisableMSVCWarnings.h"
|
||||
|
||||
#include "LU"
|
||||
#include "QR"
|
||||
#include "Geometry"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \defgroup Regression_Module Regression module
|
||||
* This module provides linear regression and related features.
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/Regression>
|
||||
* \endcode
|
||||
*/
|
||||
|
||||
#include "src/Regression/Regression.h"
|
||||
|
||||
} // namespace Eigen
|
||||
|
||||
#include "src/Core/util/EnableMSVCWarnings.h"
|
||||
|
||||
#endif // EIGEN_REGRESSION_MODULE_H
|
||||
@@ -1,28 +1,5 @@
|
||||
#ifndef EIGEN_REGRESSION_MODULE_H
|
||||
#define EIGEN_REGRESSION_MODULE_H
|
||||
#ifdef __GNUC__
|
||||
#warning "The Eigen/Regression header file has been renamed to Eigen/LeastSquares. The old name is deprecated, please update your code."
|
||||
#endif
|
||||
|
||||
#include "Core"
|
||||
|
||||
#include "src/Core/util/DisableMSVCWarnings.h"
|
||||
|
||||
#include "LU"
|
||||
#include "QR"
|
||||
#include "Geometry"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \defgroup Regression_Module Regression module
|
||||
* This module provides linear regression and related features.
|
||||
*
|
||||
* \code
|
||||
* #include <Eigen/Regression>
|
||||
* \endcode
|
||||
*/
|
||||
|
||||
#include "src/Regression/Regression.h"
|
||||
|
||||
} // namespace Eigen
|
||||
|
||||
#include "src/Core/util/EnableMSVCWarnings.h"
|
||||
|
||||
#endif // EIGEN_REGRESSION_MODULE_H
|
||||
#include "LeastSquares"
|
||||
@@ -77,10 +77,8 @@ namespace Eigen {
|
||||
#include "src/Sparse/RandomSetter.h"
|
||||
#include "src/Sparse/SparseBlock.h"
|
||||
#include "src/Sparse/SparseMatrix.h"
|
||||
//#include "src/Sparse/HashMatrix.h"
|
||||
//#include "src/Sparse/LinkedVectorMatrix.h"
|
||||
#include "src/Sparse/SparseVector.h"
|
||||
#include "src/Sparse/CoreIterators.h"
|
||||
//#include "src/Sparse/SparseSetter.h"
|
||||
#include "src/Sparse/SparseProduct.h"
|
||||
#include "src/Sparse/TriangularSolver.h"
|
||||
#include "src/Sparse/SparseLLT.h"
|
||||
|
||||
@@ -95,9 +95,9 @@ static void ei_cache_friendly_product(
|
||||
const bool needRhsCopy = (PacketSize>1) && ((rhsStride%PacketSize!=0) || (size_t(rhs)%16!=0));
|
||||
Scalar* EIGEN_RESTRICT block = 0;
|
||||
const int allocBlockSize = l2BlockRows*size;
|
||||
block = ei_alloc_stack(Scalar, allocBlockSize);
|
||||
block = ei_aligned_stack_alloc(Scalar, allocBlockSize);
|
||||
Scalar* EIGEN_RESTRICT rhsCopy
|
||||
= ei_alloc_stack(Scalar, l2BlockSizeAligned*l2BlockSizeAligned);
|
||||
= ei_aligned_stack_alloc(Scalar, l2BlockSizeAligned*l2BlockSizeAligned);
|
||||
|
||||
// loops on each L2 cache friendly blocks of the result
|
||||
for(int l2i=0; l2i<rows; l2i+=l2BlockRows)
|
||||
@@ -338,8 +338,8 @@ static void ei_cache_friendly_product(
|
||||
}
|
||||
}
|
||||
|
||||
ei_free_stack(block, Scalar, allocBlockSize);
|
||||
ei_free_stack(rhsCopy, Scalar, l2BlockSizeAligned*l2BlockSizeAligned);
|
||||
ei_aligned_stack_free(block, Scalar, allocBlockSize);
|
||||
ei_aligned_stack_free(rhsCopy, Scalar, l2BlockSizeAligned*l2BlockSizeAligned);
|
||||
}
|
||||
|
||||
#endif // EIGEN_EXTERN_INSTANTIATIONS
|
||||
|
||||
@@ -40,7 +40,10 @@
|
||||
* \param _Cols Number of columns, or \b Dynamic
|
||||
*
|
||||
* The remaining template parameters are optional -- in most cases you don't have to worry about them.
|
||||
* \param _StorageOrder Either \b RowMajor or \b ColMajor. The default is \b ColMajor.
|
||||
* \param _Options A combination of either \b Matrix_RowMajor or \b Matrix_ColMajor, and of either
|
||||
* \b Matrix_AutoAlign or \b Matrix_DontAlign.
|
||||
* The former controls storage order, and defaults to column-major. The latter controls alignment, which is required
|
||||
* for vectorization. It defaults to aligning matrices except for fixed sizes that aren't a multiple of the packet size.
|
||||
* \param _MaxRows Maximum number of rows. Defaults to \a _Rows (\ref maxrows "note").
|
||||
* \param _MaxCols Maximum number of columns. Defaults to \a _Cols (\ref maxrows "note").
|
||||
*
|
||||
@@ -85,7 +88,7 @@
|
||||
* to 16x16. Larger matrices should be declared as dynamic-size even if one happens to know their size at compile-time.
|
||||
*
|
||||
* Dynamic-size means that the numbers of rows or columns are not necessarily known at compile-time. In this case they are runtime
|
||||
* variables, and the array of coefficients is allocated dynamically, typically on the heap (\ref alloca "note").
|
||||
* variables, and the array of coefficients is allocated dynamically on the heap.
|
||||
*
|
||||
* Note that \em dense matrices, be they Fixed-size or Dynamic-size, <em>do not</em> expand dynamically in the sense of a std::map.
|
||||
* If you want this behavior, see the Sparse module.</dd>
|
||||
@@ -97,15 +100,10 @@
|
||||
* exceed a certain value. This happens when taking dynamic-size blocks inside fixed-size matrices: in this case _MaxRows and _MaxCols
|
||||
* are the dimensions of the original matrix, while _Rows and _Cols are Dynamic.</dd>
|
||||
*
|
||||
* <dt><b>\anchor alloca Usage of alloca():</b></dt>
|
||||
* <dd>On the Linux platform, for small enough arrays, Eigen will avoid heap allocation and instead will use alloca() to perform a dynamic
|
||||
* allocation on the stack.</dd>
|
||||
* </dl>
|
||||
*
|
||||
* \see MatrixBase for the majority of the API methods for matrices
|
||||
*/
|
||||
template<typename _Scalar, int _Rows, int _Cols, int _StorageOrder, int _MaxRows, int _MaxCols>
|
||||
struct ei_traits<Matrix<_Scalar, _Rows, _Cols, _StorageOrder, _MaxRows, _MaxCols> >
|
||||
template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
|
||||
struct ei_traits<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
enum {
|
||||
@@ -113,28 +111,34 @@ struct ei_traits<Matrix<_Scalar, _Rows, _Cols, _StorageOrder, _MaxRows, _MaxCols
|
||||
ColsAtCompileTime = _Cols,
|
||||
MaxRowsAtCompileTime = _MaxRows,
|
||||
MaxColsAtCompileTime = _MaxCols,
|
||||
Flags = ei_compute_matrix_flags<_Scalar, _Rows, _Cols, _StorageOrder, _MaxRows, _MaxCols>::ret,
|
||||
Flags = ei_compute_matrix_flags<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::ret,
|
||||
CoeffReadCost = NumTraits<Scalar>::ReadCost,
|
||||
SupportedAccessPatterns = RandomAccessPattern
|
||||
};
|
||||
};
|
||||
|
||||
template<typename _Scalar, int _Rows, int _Cols, int _StorageOrder, int _MaxRows, int _MaxCols>
|
||||
template<typename T, int Rows, int Cols, int Options,
|
||||
bool NeedsToAlign = ((Options&Matrix_AutoAlign) == Matrix_AutoAlign) && Rows!=Dynamic && Cols!=Dynamic && ((sizeof(T)*Rows*Cols)%16==0)>
|
||||
struct ei_matrix_with_aligned_operator_new : WithAlignedOperatorNew {};
|
||||
|
||||
template<typename T, int Rows, int Cols, int Options>
|
||||
struct ei_matrix_with_aligned_operator_new<T, Rows, Cols, Options, false> {};
|
||||
|
||||
template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
|
||||
class Matrix
|
||||
: public MatrixBase<Matrix<_Scalar, _Rows, _Cols, _StorageOrder, _MaxRows, _MaxCols> >
|
||||
#ifdef EIGEN_VECTORIZE
|
||||
, public ei_with_aligned_operator_new<_Scalar,ei_size_at_compile_time<_Rows,_Cols>::ret>
|
||||
#endif
|
||||
: public MatrixBase<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >
|
||||
, public ei_matrix_with_aligned_operator_new<_Scalar, _Rows, _Cols, _Options>
|
||||
{
|
||||
public:
|
||||
EIGEN_GENERIC_PUBLIC_INTERFACE(Matrix)
|
||||
enum { Options = _Options };
|
||||
friend class Eigen::Map<Matrix, Unaligned>;
|
||||
typedef class Eigen::Map<Matrix, Unaligned> UnalignedMapType;
|
||||
friend class Eigen::Map<Matrix, Aligned>;
|
||||
typedef class Eigen::Map<Matrix, Aligned> AlignedMapType;
|
||||
|
||||
protected:
|
||||
ei_matrix_storage<Scalar, MaxSizeAtCompileTime, RowsAtCompileTime, ColsAtCompileTime> m_storage;
|
||||
ei_matrix_storage<Scalar, MaxSizeAtCompileTime, RowsAtCompileTime, ColsAtCompileTime, Options> m_storage;
|
||||
|
||||
public:
|
||||
|
||||
|
||||
@@ -26,6 +26,27 @@
|
||||
#ifndef EIGEN_MATRIXSTORAGE_H
|
||||
#define EIGEN_MATRIXSTORAGE_H
|
||||
|
||||
/** \internal
|
||||
* Static array automatically aligned if the total byte size is a multiple of 16 and the matrix options require auto alignment
|
||||
*/
|
||||
template <typename T, int Size, int MatrixOptions,
|
||||
bool Align = (MatrixOptions&Matrix_AutoAlign) && (((Size*sizeof(T))&0xf)==0)
|
||||
> struct ei_matrix_array
|
||||
{
|
||||
EIGEN_ALIGN_128 T array[Size];
|
||||
|
||||
ei_matrix_array()
|
||||
{
|
||||
ei_assert((reinterpret_cast<size_t>(array) & 0xf) == 0
|
||||
&& "this assertion is explained here: http://eigen.tuxfamily.org/api/UnalignedArrayAssert.html **** READ THIS WEB PAGE !!! ****");
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, int Size, int MatrixOptions> struct ei_matrix_array<T,Size,MatrixOptions,false>
|
||||
{
|
||||
T array[Size];
|
||||
};
|
||||
|
||||
/** \internal
|
||||
*
|
||||
* \class ei_matrix_storage
|
||||
@@ -37,12 +58,12 @@
|
||||
*
|
||||
* \sa Matrix
|
||||
*/
|
||||
template<typename T, int Size, int _Rows, int _Cols> class ei_matrix_storage;
|
||||
template<typename T, int Size, int _Rows, int _Cols, int _Options> class ei_matrix_storage;
|
||||
|
||||
// purely fixed-size matrix
|
||||
template<typename T, int Size, int _Rows, int _Cols> class ei_matrix_storage
|
||||
template<typename T, int Size, int _Rows, int _Cols, int _Options> class ei_matrix_storage
|
||||
{
|
||||
ei_aligned_array<T,Size,((Size*sizeof(T))%16)==0> m_data;
|
||||
ei_matrix_array<T,Size,_Options> m_data;
|
||||
public:
|
||||
inline explicit ei_matrix_storage() {}
|
||||
inline ei_matrix_storage(int,int,int) {}
|
||||
@@ -55,9 +76,9 @@ template<typename T, int Size, int _Rows, int _Cols> class ei_matrix_storage
|
||||
};
|
||||
|
||||
// dynamic-size matrix with fixed-size storage
|
||||
template<typename T, int Size> class ei_matrix_storage<T, Size, Dynamic, Dynamic>
|
||||
template<typename T, int Size, int _Options> class ei_matrix_storage<T, Size, Dynamic, Dynamic, _Options>
|
||||
{
|
||||
ei_aligned_array<T,Size,((Size*sizeof(T))%16)==0> m_data;
|
||||
ei_matrix_array<T,Size,_Options> m_data;
|
||||
int m_rows;
|
||||
int m_cols;
|
||||
public:
|
||||
@@ -78,9 +99,9 @@ template<typename T, int Size> class ei_matrix_storage<T, Size, Dynamic, Dynamic
|
||||
};
|
||||
|
||||
// dynamic-size matrix with fixed-size storage and fixed width
|
||||
template<typename T, int Size, int _Cols> class ei_matrix_storage<T, Size, Dynamic, _Cols>
|
||||
template<typename T, int Size, int _Cols, int _Options> class ei_matrix_storage<T, Size, Dynamic, _Cols, _Options>
|
||||
{
|
||||
ei_aligned_array<T,Size,((Size*sizeof(T))%16)==0> m_data;
|
||||
ei_matrix_array<T,Size,_Options> m_data;
|
||||
int m_rows;
|
||||
public:
|
||||
inline explicit ei_matrix_storage() : m_rows(0) {}
|
||||
@@ -98,9 +119,9 @@ template<typename T, int Size, int _Cols> class ei_matrix_storage<T, Size, Dynam
|
||||
};
|
||||
|
||||
// dynamic-size matrix with fixed-size storage and fixed height
|
||||
template<typename T, int Size, int _Rows> class ei_matrix_storage<T, Size, _Rows, Dynamic>
|
||||
template<typename T, int Size, int _Rows, int _Options> class ei_matrix_storage<T, Size, _Rows, Dynamic, _Options>
|
||||
{
|
||||
ei_aligned_array<T,Size,((Size*sizeof(T))%16)==0> m_data;
|
||||
ei_matrix_array<T,Size,_Options> m_data;
|
||||
int m_cols;
|
||||
public:
|
||||
inline explicit ei_matrix_storage() : m_cols(0) {}
|
||||
@@ -109,7 +130,7 @@ template<typename T, int Size, int _Rows> class ei_matrix_storage<T, Size, _Rows
|
||||
inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
|
||||
inline int rows(void) const {return _Rows;}
|
||||
inline int cols(void) const {return m_cols;}
|
||||
inline void resize(int size, int, int cols)
|
||||
inline void resize(int, int, int cols)
|
||||
{
|
||||
m_cols = cols;
|
||||
}
|
||||
@@ -118,7 +139,7 @@ template<typename T, int Size, int _Rows> class ei_matrix_storage<T, Size, _Rows
|
||||
};
|
||||
|
||||
// purely dynamic matrix.
|
||||
template<typename T> class ei_matrix_storage<T, Dynamic, Dynamic, Dynamic>
|
||||
template<typename T, int _Options> class ei_matrix_storage<T, Dynamic, Dynamic, Dynamic, _Options>
|
||||
{
|
||||
T *m_data;
|
||||
int m_rows;
|
||||
@@ -127,7 +148,7 @@ template<typename T> class ei_matrix_storage<T, Dynamic, Dynamic, Dynamic>
|
||||
inline explicit ei_matrix_storage() : m_data(0), m_rows(0), m_cols(0) {}
|
||||
inline ei_matrix_storage(int size, int rows, int cols)
|
||||
: m_data(ei_aligned_malloc<T>(size)), m_rows(rows), m_cols(cols) {}
|
||||
inline ~ei_matrix_storage() { ei_aligned_free(m_data); }
|
||||
inline ~ei_matrix_storage() { ei_aligned_free(m_data, m_rows*m_cols); }
|
||||
inline void swap(ei_matrix_storage& other)
|
||||
{ std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
|
||||
inline int rows(void) const {return m_rows;}
|
||||
@@ -136,7 +157,7 @@ template<typename T> class ei_matrix_storage<T, Dynamic, Dynamic, Dynamic>
|
||||
{
|
||||
if(size != m_rows*m_cols)
|
||||
{
|
||||
ei_aligned_free(m_data);
|
||||
ei_aligned_free(m_data, m_rows*m_cols);
|
||||
m_data = ei_aligned_malloc<T>(size);
|
||||
}
|
||||
m_rows = rows;
|
||||
@@ -147,14 +168,14 @@ template<typename T> class ei_matrix_storage<T, Dynamic, Dynamic, Dynamic>
|
||||
};
|
||||
|
||||
// matrix with dynamic width and fixed height (so that matrix has dynamic size).
|
||||
template<typename T, int _Rows> class ei_matrix_storage<T, Dynamic, _Rows, Dynamic>
|
||||
template<typename T, int _Rows, int _Options> class ei_matrix_storage<T, Dynamic, _Rows, Dynamic, _Options>
|
||||
{
|
||||
T *m_data;
|
||||
int m_cols;
|
||||
public:
|
||||
inline explicit ei_matrix_storage() : m_data(0), m_cols(0) {}
|
||||
inline ei_matrix_storage(int size, int, int cols) : m_data(ei_aligned_malloc<T>(size)), m_cols(cols) {}
|
||||
inline ~ei_matrix_storage() { ei_aligned_free(m_data); }
|
||||
inline ~ei_matrix_storage() { ei_aligned_free(m_data, _Rows*m_cols); }
|
||||
inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
|
||||
inline static int rows(void) {return _Rows;}
|
||||
inline int cols(void) const {return m_cols;}
|
||||
@@ -162,7 +183,7 @@ template<typename T, int _Rows> class ei_matrix_storage<T, Dynamic, _Rows, Dynam
|
||||
{
|
||||
if(size != _Rows*m_cols)
|
||||
{
|
||||
ei_aligned_free(m_data);
|
||||
ei_aligned_free(m_data, _Rows*m_cols);
|
||||
m_data = ei_aligned_malloc<T>(size);
|
||||
}
|
||||
m_cols = cols;
|
||||
@@ -172,14 +193,14 @@ template<typename T, int _Rows> class ei_matrix_storage<T, Dynamic, _Rows, Dynam
|
||||
};
|
||||
|
||||
// matrix with dynamic height and fixed width (so that matrix has dynamic size).
|
||||
template<typename T, int _Cols> class ei_matrix_storage<T, Dynamic, Dynamic, _Cols>
|
||||
template<typename T, int _Cols, int _Options> class ei_matrix_storage<T, Dynamic, Dynamic, _Cols, _Options>
|
||||
{
|
||||
T *m_data;
|
||||
int m_rows;
|
||||
public:
|
||||
inline explicit ei_matrix_storage() : m_data(0), m_rows(0) {}
|
||||
inline ei_matrix_storage(int size, int rows, int) : m_data(ei_aligned_malloc<T>(size)), m_rows(rows) {}
|
||||
inline ~ei_matrix_storage() { ei_aligned_free(m_data); }
|
||||
inline ~ei_matrix_storage() { ei_aligned_free(m_data, _Cols*m_rows); }
|
||||
inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
|
||||
inline int rows(void) const {return m_rows;}
|
||||
inline static int cols(void) {return _Cols;}
|
||||
@@ -187,7 +208,7 @@ template<typename T, int _Cols> class ei_matrix_storage<T, Dynamic, Dynamic, _Co
|
||||
{
|
||||
if(size != m_rows*_Cols)
|
||||
{
|
||||
ei_aligned_free(m_data);
|
||||
ei_aligned_free(m_data, _Cols*m_rows);
|
||||
m_data = ei_aligned_malloc<T>(size);
|
||||
}
|
||||
m_rows = rows;
|
||||
|
||||
@@ -89,9 +89,9 @@ template<typename Lhs, typename Rhs> struct ei_product_mode
|
||||
? DiagonalProduct
|
||||
: (Rhs::Flags & Lhs::Flags & SparseBit)
|
||||
? SparseProduct
|
||||
: Lhs::MaxColsAtCompileTime >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
|
||||
&& ( Lhs::MaxRowsAtCompileTime >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
|
||||
|| Rhs::MaxColsAtCompileTime >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD )
|
||||
: Lhs::MaxColsAtCompileTime == Dynamic
|
||||
&& ( Lhs::MaxRowsAtCompileTime == Dynamic
|
||||
|| Rhs::MaxColsAtCompileTime == Dynamic )
|
||||
&& (!(Rhs::IsVectorAtCompileTime && (Lhs::Flags&RowMajorBit) && (!(Lhs::Flags&DirectAccessBit))))
|
||||
&& (!(Lhs::IsVectorAtCompileTime && (!(Rhs::Flags&RowMajorBit)) && (!(Rhs::Flags&DirectAccessBit))))
|
||||
&& (ei_is_same_type<typename Lhs::Scalar, typename Rhs::Scalar>::ret)
|
||||
@@ -573,7 +573,7 @@ struct ei_cache_friendly_product_selector<ProductType,LhsRows,ColMajor,HasDirect
|
||||
_res = &res.coeffRef(0);
|
||||
else
|
||||
{
|
||||
_res = ei_alloc_stack(Scalar,res.size());
|
||||
_res = ei_aligned_stack_alloc(Scalar,res.size());
|
||||
Map<Matrix<Scalar,DestDerived::RowsAtCompileTime,1> >(_res, res.size()) = res;
|
||||
}
|
||||
ei_cache_friendly_product_colmajor_times_vector(res.size(),
|
||||
@@ -583,7 +583,7 @@ struct ei_cache_friendly_product_selector<ProductType,LhsRows,ColMajor,HasDirect
|
||||
if (!EvalToRes)
|
||||
{
|
||||
res = Map<Matrix<Scalar,DestDerived::SizeAtCompileTime,1> >(_res, res.size());
|
||||
ei_free_stack(_res, Scalar, res.size());
|
||||
ei_aligned_stack_free(_res, Scalar, res.size());
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -619,7 +619,7 @@ struct ei_cache_friendly_product_selector<ProductType,1,LhsOrder,LhsAccess,RhsCo
|
||||
_res = &res.coeffRef(0);
|
||||
else
|
||||
{
|
||||
_res = ei_alloc_stack(Scalar, res.size());
|
||||
_res = ei_aligned_stack_alloc(Scalar, res.size());
|
||||
Map<Matrix<Scalar,DestDerived::SizeAtCompileTime,1> >(_res, res.size()) = res;
|
||||
}
|
||||
ei_cache_friendly_product_colmajor_times_vector(res.size(),
|
||||
@@ -629,7 +629,7 @@ struct ei_cache_friendly_product_selector<ProductType,1,LhsOrder,LhsAccess,RhsCo
|
||||
if (!EvalToRes)
|
||||
{
|
||||
res = Map<Matrix<Scalar,DestDerived::SizeAtCompileTime,1> >(_res, res.size());
|
||||
ei_free_stack(_res, Scalar, res.size());
|
||||
ei_aligned_stack_free(_res, Scalar, res.size());
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -652,13 +652,13 @@ struct ei_cache_friendly_product_selector<ProductType,LhsRows,RowMajor,HasDirect
|
||||
_rhs = &product.rhs().const_cast_derived().coeffRef(0);
|
||||
else
|
||||
{
|
||||
_rhs = ei_alloc_stack(Scalar, product.rhs().size());
|
||||
_rhs = ei_aligned_stack_alloc(Scalar, product.rhs().size());
|
||||
Map<Matrix<Scalar,Rhs::SizeAtCompileTime,1> >(_rhs, product.rhs().size()) = product.rhs();
|
||||
}
|
||||
ei_cache_friendly_product_rowmajor_times_vector(&product.lhs().const_cast_derived().coeffRef(0,0), product.lhs().stride(),
|
||||
_rhs, product.rhs().size(), res);
|
||||
|
||||
if (!UseRhsDirectly) ei_free_stack(_rhs, Scalar, product.rhs().size());
|
||||
if (!UseRhsDirectly) ei_aligned_stack_free(_rhs, Scalar, product.rhs().size());
|
||||
}
|
||||
};
|
||||
|
||||
@@ -680,13 +680,13 @@ struct ei_cache_friendly_product_selector<ProductType,1,LhsOrder,LhsAccess,RhsCo
|
||||
_lhs = &product.lhs().const_cast_derived().coeffRef(0);
|
||||
else
|
||||
{
|
||||
_lhs = ei_alloc_stack(Scalar, product.lhs().size());
|
||||
_lhs = ei_aligned_stack_alloc(Scalar, product.lhs().size());
|
||||
Map<Matrix<Scalar,Lhs::SizeAtCompileTime,1> >(_lhs, product.lhs().size()) = product.lhs();
|
||||
}
|
||||
ei_cache_friendly_product_rowmajor_times_vector(&product.rhs().const_cast_derived().coeffRef(0,0), product.rhs().stride(),
|
||||
_lhs, product.lhs().size(), res);
|
||||
|
||||
if(!UseLhsDirectly) ei_free_stack(_lhs, Scalar, product.lhs().size());
|
||||
if(!UseLhsDirectly) ei_aligned_stack_free(_lhs, Scalar, product.lhs().size());
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -223,8 +223,15 @@ enum {
|
||||
};
|
||||
|
||||
enum {
|
||||
ColMajor = 0,
|
||||
RowMajor = RowMajorBit
|
||||
Matrix_ColMajor = 0,
|
||||
Matrix_RowMajor = 0x1, // it is only a coincidence that this is equal to RowMajorBit -- don't rely on that
|
||||
/** \internal Don't require alignment for the matrix itself (the array of coefficients, if dynamically allocated, may still be
|
||||
requested to be aligned) */
|
||||
ColMajor = Matrix_ColMajor, // deprecated
|
||||
RowMajor = Matrix_RowMajor, // deprecated
|
||||
Matrix_DontAlign = 0,
|
||||
/** \internal Align the matrix itself */
|
||||
Matrix_AutoAlign = 0x2
|
||||
};
|
||||
|
||||
enum {
|
||||
|
||||
@@ -28,7 +28,8 @@
|
||||
template<typename T> struct ei_traits;
|
||||
template<typename T> struct NumTraits;
|
||||
|
||||
template<typename _Scalar, int _Rows, int _Cols, int _StorageOrder = ColMajor,
|
||||
template<typename _Scalar, int _Rows, int _Cols,
|
||||
int _Options = EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION | Matrix_AutoAlign,
|
||||
int _MaxRows = _Rows, int _MaxCols = _Cols> class Matrix;
|
||||
|
||||
template<typename ExpressionType, unsigned int Added, unsigned int Removed> class Flagged;
|
||||
|
||||
@@ -28,6 +28,20 @@
|
||||
|
||||
#undef minor
|
||||
|
||||
#define EIGEN_WORLD_VERSION 2
|
||||
#define EIGEN_MAJOR_VERSION 0
|
||||
#define EIGEN_MINOR_VERSION 0
|
||||
|
||||
#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
|
||||
(EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
|
||||
EIGEN_MINOR_VERSION>=z))))
|
||||
|
||||
#ifdef EIGEN_DEFAULT_TO_ROW_MAJOR
|
||||
#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION Matrix_RowMajor
|
||||
#else
|
||||
#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION Matrix_ColMajor
|
||||
#endif
|
||||
|
||||
/** \internal Defines the maximal loop size to enable meta unrolling of loops.
|
||||
* Note that the value here is expressed in Eigen's own notion of "number of FLOPS",
|
||||
* it does not correspond to the number of iterations or the number of instructions
|
||||
|
||||
@@ -26,35 +26,11 @@
|
||||
#ifndef EIGEN_MEMORY_H
|
||||
#define EIGEN_MEMORY_H
|
||||
|
||||
#if defined(EIGEN_VECTORIZE) && !defined(_MSC_VER)
|
||||
#ifdef __linux
|
||||
// it seems we cannot assume posix_memalign is defined in the stdlib header
|
||||
extern "C" int posix_memalign (void **, size_t, size_t) throw ();
|
||||
#endif
|
||||
|
||||
/** \internal
|
||||
* Static array automatically aligned if the total byte size is a multiple of 16
|
||||
*/
|
||||
template <typename T, int Size, bool Align> struct ei_aligned_array
|
||||
{
|
||||
EIGEN_ALIGN_128 T array[Size];
|
||||
|
||||
ei_aligned_array()
|
||||
{
|
||||
#ifdef EIGEN_VECTORIZE // we only want this assertion if EIGEN_VECTORIZE is defined.
|
||||
// indeed, if it's not defined then WithAlignedOperatorNew is empty and hence there's not much point
|
||||
// requiring the user to inherit it! Would be best practice, but we already decided at several places
|
||||
// to only do special alignment if vectorization is enabled.
|
||||
ei_assert((reinterpret_cast<size_t>(array) & 0xf) == 0
|
||||
&& "this assertion is explained here: http://eigen.tuxfamily.org/api/UnalignedArrayAssert.html **** READ THIS WEB PAGE !!! ****");
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, int Size> struct ei_aligned_array<T,Size,false>
|
||||
{
|
||||
T array[Size];
|
||||
};
|
||||
|
||||
struct ei_byte_forcing_aligned_malloc
|
||||
{
|
||||
unsigned char c; // sizeof must be 1.
|
||||
@@ -69,59 +45,64 @@ template<> struct ei_force_aligned_malloc<ei_byte_forcing_aligned_malloc> { enum
|
||||
template<typename T>
|
||||
inline T* ei_aligned_malloc(size_t size)
|
||||
{
|
||||
T* result;
|
||||
|
||||
#ifdef EIGEN_VECTORIZE
|
||||
if(ei_packet_traits<T>::size>1 || ei_force_aligned_malloc<T>::ret)
|
||||
{
|
||||
void *void_result;
|
||||
#ifdef __linux
|
||||
#ifdef EIGEN_EXCEPTIONS
|
||||
const int failed =
|
||||
#endif
|
||||
posix_memalign(reinterpret_cast<void**>(&result), 16, size*sizeof(T));
|
||||
posix_memalign(&void_result, 16, size*sizeof(T));
|
||||
#else
|
||||
#ifdef _MSC_VER
|
||||
result = static_cast<T*>(_aligned_malloc(size*sizeof(T), 16));
|
||||
void_result = _aligned_malloc(size*sizeof(T), 16);
|
||||
#else
|
||||
result = static_cast<T*>(_mm_malloc(size*sizeof(T),16));
|
||||
void_result = _mm_malloc(size*sizeof(T), 16);
|
||||
#endif
|
||||
|
||||
#ifdef EIGEN_EXCEPTIONS
|
||||
const int failed = (result == 0);
|
||||
const int failed = (void_result == 0);
|
||||
#endif
|
||||
#endif
|
||||
#ifdef EIGEN_EXCEPTIONS
|
||||
if(failed)
|
||||
throw std::bad_alloc();
|
||||
#endif
|
||||
// if the user uses Eigen on some fancy scalar type such as multiple-precision numbers,
|
||||
// and this type has a custom operator new, then we want to honor this operator new!
|
||||
// so when we use C functions to allocate memory, we must be careful to call manually the constructor using
|
||||
// the special placement-new syntax.
|
||||
return new(void_result) T[size];
|
||||
}
|
||||
else
|
||||
#endif
|
||||
result = new T[size]; // here we really want a new, not a malloc. Justification: if the user uses Eigen on
|
||||
return new T[size]; // here we really want a new, not a malloc. Justification: if the user uses Eigen on
|
||||
// some fancy scalar type such as multiple-precision numbers, and this type has a custom operator new,
|
||||
// then we want to honor this operator new! Anyway this type won't have vectorization so the vectorizing path
|
||||
// is irrelevant here. Yes, we should say somewhere in the docs that if the user uses a custom scalar type then
|
||||
// he can't have both vectorization and a custom operator new on his scalar type.
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/** \internal free memory allocated with ei_aligned_malloc */
|
||||
/** \internal free memory allocated with ei_aligned_malloc
|
||||
* The \a size parameter is used to determine on how many elements to call the destructor. If you don't
|
||||
* want any destructor to be called, just pass 0.
|
||||
*/
|
||||
template<typename T>
|
||||
inline void ei_aligned_free(T* ptr)
|
||||
inline void ei_aligned_free(T* ptr, size_t size)
|
||||
{
|
||||
#ifdef EIGEN_VECTORIZE
|
||||
if (ei_packet_traits<T>::size>1 || ei_force_aligned_malloc<T>::ret)
|
||||
#if defined(__linux)
|
||||
free(ptr);
|
||||
#elif defined(_MSC_VER)
|
||||
_aligned_free(ptr);
|
||||
#else
|
||||
_mm_free(ptr);
|
||||
#endif
|
||||
{
|
||||
// need to call manually the dtor in case T is some user-defined fancy numeric type.
|
||||
// always destruct an array starting from the end.
|
||||
while(size) ptr[--size].~T();
|
||||
#if defined(__linux)
|
||||
free(ptr);
|
||||
#elif defined(_MSC_VER)
|
||||
_aligned_free(ptr);
|
||||
#else
|
||||
_mm_free(ptr);
|
||||
#endif
|
||||
}
|
||||
else
|
||||
#endif
|
||||
delete[] ptr;
|
||||
delete[] ptr;
|
||||
}
|
||||
|
||||
/** \internal \returns the number of elements which have to be skipped such that data are 16 bytes aligned */
|
||||
@@ -139,22 +120,24 @@ inline static int ei_alignmentOffset(const Scalar* ptr, int maxOffset)
|
||||
}
|
||||
|
||||
/** \internal
|
||||
* ei_alloc_stack(TYPE,SIZE) allocates sizeof(TYPE)*SIZE bytes on the stack if sizeof(TYPE)*SIZE is
|
||||
* smaller than EIGEN_STACK_ALLOCATION_LIMIT. Otherwise the memory is allocated using the operator new.
|
||||
* Data allocated with ei_alloc_stack \b must be freed by calling ei_free_stack(PTR,TYPE,SIZE).
|
||||
* ei_aligned_stack_alloc(TYPE,SIZE) allocates an aligned buffer of sizeof(TYPE)*SIZE bytes
|
||||
* on the stack if sizeof(TYPE)*SIZE is smaller than EIGEN_STACK_ALLOCATION_LIMIT.
|
||||
* Otherwise the memory is allocated on the heap.
|
||||
* Data allocated with ei_aligned_stack_alloc \b must be freed by calling ei_aligned_stack_free(PTR,TYPE,SIZE).
|
||||
* \code
|
||||
* float * data = ei_alloc_stack(float,array.size());
|
||||
* float * data = ei_aligned_stack_alloc(float,array.size());
|
||||
* // ...
|
||||
* ei_free_stack(data,float,array.size());
|
||||
* ei_aligned_stack_free(data,float,array.size());
|
||||
* \endcode
|
||||
*/
|
||||
#ifdef __linux__
|
||||
#define ei_alloc_stack(TYPE,SIZE) ((sizeof(TYPE)*(SIZE)>EIGEN_STACK_ALLOCATION_LIMIT) ? \
|
||||
new TYPE[SIZE] : (TYPE*)alloca(sizeof(TYPE)*(SIZE)))
|
||||
#define ei_free_stack(PTR,TYPE,SIZE) if (sizeof(TYPE)*SIZE>EIGEN_STACK_ALLOCATION_LIMIT) delete[] PTR
|
||||
#define ei_aligned_stack_alloc(TYPE,SIZE) ((sizeof(TYPE)*(SIZE)>EIGEN_STACK_ALLOCATION_LIMIT) \
|
||||
? ei_aligned_malloc<TYPE>(SIZE) \
|
||||
: (TYPE*)alloca(sizeof(TYPE)*(SIZE)))
|
||||
#define ei_aligned_stack_free(PTR,TYPE,SIZE) if (sizeof(TYPE)*SIZE>EIGEN_STACK_ALLOCATION_LIMIT) ei_aligned_free(PTR,SIZE)
|
||||
#else
|
||||
#define ei_alloc_stack(TYPE,SIZE) new TYPE[SIZE]
|
||||
#define ei_free_stack(PTR,TYPE,SIZE) delete[] PTR
|
||||
#define ei_aligned_stack_alloc(TYPE,SIZE) ei_aligned_malloc<TYPE>(SIZE)
|
||||
#define ei_aligned_stack_free(PTR,TYPE,SIZE) ei_aligned_free(PTR,SIZE)
|
||||
#endif
|
||||
|
||||
/** \class WithAlignedOperatorNew
|
||||
@@ -198,8 +181,6 @@ inline static int ei_alignmentOffset(const Scalar* ptr, int maxOffset)
|
||||
*/
|
||||
struct WithAlignedOperatorNew
|
||||
{
|
||||
#ifdef EIGEN_VECTORIZE
|
||||
|
||||
void *operator new(size_t size) throw()
|
||||
{
|
||||
return ei_aligned_malloc<ei_byte_forcing_aligned_malloc>(size);
|
||||
@@ -210,10 +191,8 @@ struct WithAlignedOperatorNew
|
||||
return ei_aligned_malloc<ei_byte_forcing_aligned_malloc>(size);
|
||||
}
|
||||
|
||||
void operator delete(void * ptr) { ei_aligned_free(static_cast<ei_byte_forcing_aligned_malloc *>(ptr)); }
|
||||
void operator delete[](void * ptr) { ei_aligned_free(static_cast<ei_byte_forcing_aligned_malloc *>(ptr)); }
|
||||
|
||||
#endif
|
||||
void operator delete(void * ptr) { ei_aligned_free(static_cast<ei_byte_forcing_aligned_malloc *>(ptr), 0); }
|
||||
void operator delete[](void * ptr) { ei_aligned_free(static_cast<ei_byte_forcing_aligned_malloc *>(ptr), 0); }
|
||||
};
|
||||
|
||||
template<typename T, int SizeAtCompileTime,
|
||||
|
||||
@@ -70,7 +70,8 @@
|
||||
INVALID_MATRIX_PRODUCT,
|
||||
INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS,
|
||||
INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION,
|
||||
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY
|
||||
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY,
|
||||
THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -85,22 +85,16 @@ template<typename T> struct ei_unpacket_traits
|
||||
enum {size=1};
|
||||
};
|
||||
|
||||
|
||||
template<typename Scalar, int Rows, int Cols, int StorageOrder, int MaxRows, int MaxCols>
|
||||
template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
|
||||
class ei_compute_matrix_flags
|
||||
{
|
||||
enum {
|
||||
row_major_bit = (Rows != 1 && Cols != 1) // if this is not a vector,
|
||||
// then the storage order really matters,
|
||||
// so let us strictly honor the user's choice.
|
||||
? StorageOrder
|
||||
: Cols > 1 ? RowMajorBit : 0,
|
||||
row_major_bit = Options&Matrix_RowMajor ? RowMajorBit : 0,
|
||||
inner_max_size = row_major_bit ? MaxCols : MaxRows,
|
||||
is_big = inner_max_size == Dynamic,
|
||||
is_packet_size_multiple = (Cols * Rows)%ei_packet_traits<Scalar>::size==0,
|
||||
packet_access_bit = ei_packet_traits<Scalar>::size > 1
|
||||
&& (is_big || is_packet_size_multiple) ? PacketAccessBit : 0,
|
||||
aligned_bit = packet_access_bit && (is_big || is_packet_size_multiple) ? AlignedBit : 0
|
||||
is_packet_size_multiple = (Cols*Rows) % ei_packet_traits<Scalar>::size == 0,
|
||||
aligned_bit = ((Options&Matrix_AutoAlign) && (is_big || is_packet_size_multiple)) ? AlignedBit : 0,
|
||||
packet_access_bit = ei_packet_traits<Scalar>::size > 1 && aligned_bit ? PacketAccessBit : 0
|
||||
};
|
||||
|
||||
public:
|
||||
@@ -123,7 +117,7 @@ template<typename T> struct ei_eval<T,IsDense>
|
||||
typedef Matrix<typename ei_traits<T>::Scalar,
|
||||
ei_traits<T>::RowsAtCompileTime,
|
||||
ei_traits<T>::ColsAtCompileTime,
|
||||
ei_traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor,
|
||||
Matrix_AutoAlign | (ei_traits<T>::Flags&RowMajorBit ? Matrix_RowMajor : Matrix_ColMajor),
|
||||
ei_traits<T>::MaxRowsAtCompileTime,
|
||||
ei_traits<T>::MaxColsAtCompileTime
|
||||
> type;
|
||||
@@ -144,7 +138,7 @@ template<typename T> struct ei_plain_matrix_type
|
||||
typedef Matrix<typename ei_traits<T>::Scalar,
|
||||
ei_traits<T>::RowsAtCompileTime,
|
||||
ei_traits<T>::ColsAtCompileTime,
|
||||
ei_traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor,
|
||||
Matrix_AutoAlign | (ei_traits<T>::Flags&RowMajorBit ? Matrix_RowMajor : Matrix_ColMajor),
|
||||
ei_traits<T>::MaxRowsAtCompileTime,
|
||||
ei_traits<T>::MaxColsAtCompileTime
|
||||
> type;
|
||||
@@ -157,7 +151,7 @@ template<typename T> struct ei_plain_matrix_type_column_major
|
||||
typedef Matrix<typename ei_traits<T>::Scalar,
|
||||
ei_traits<T>::RowsAtCompileTime,
|
||||
ei_traits<T>::ColsAtCompileTime,
|
||||
ColMajor,
|
||||
Matrix_AutoAlign | Matrix_ColMajor,
|
||||
ei_traits<T>::MaxRowsAtCompileTime,
|
||||
ei_traits<T>::MaxColsAtCompileTime
|
||||
> type;
|
||||
|
||||
@@ -76,7 +76,7 @@ template<typename MatrixType> class LU
|
||||
MatrixType::ColsAtCompileTime, // the number of rows in the "kernel matrix" is the number of cols of the original matrix
|
||||
// so that the product "matrix * kernel = zero" makes sense
|
||||
Dynamic, // we don't know at compile-time the dimension of the kernel
|
||||
MatrixType::Flags&RowMajorBit,
|
||||
MatrixType::Options,
|
||||
MatrixType::MaxColsAtCompileTime, // see explanation for 2nd template parameter
|
||||
MatrixType::MaxColsAtCompileTime // the kernel is a subspace of the domain space, whose dimension is the number
|
||||
// of columns of the original matrix
|
||||
@@ -86,7 +86,7 @@ template<typename MatrixType> class LU
|
||||
MatrixType::RowsAtCompileTime, // the image is a subspace of the destination space, whose dimension is the number
|
||||
// of rows of the original matrix
|
||||
Dynamic, // we don't know at compile time the dimension of the image (the rank)
|
||||
MatrixType::Flags,
|
||||
MatrixType::Options,
|
||||
MatrixType::MaxRowsAtCompileTime, // the image matrix will consist of columns from the original matrix,
|
||||
MatrixType::MaxColsAtCompileTime // so it has the same number of rows and at most as many columns.
|
||||
> ImageResultType;
|
||||
@@ -436,7 +436,7 @@ void LU<MatrixType>::computeKernel(KernelMatrixType *result) const
|
||||
* independent vectors in Ker U.
|
||||
*/
|
||||
|
||||
Matrix<Scalar, Dynamic, Dynamic, MatrixType::Flags&RowMajorBit,
|
||||
Matrix<Scalar, Dynamic, Dynamic, MatrixType::Options,
|
||||
MatrixType::MaxColsAtCompileTime, MatrixType::MaxColsAtCompileTime>
|
||||
y(-m_lu.corner(TopRight, m_rank, dimker));
|
||||
|
||||
@@ -504,7 +504,7 @@ bool LU<MatrixType>::solve(
|
||||
|
||||
// Step 2
|
||||
Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime,
|
||||
MatrixType::Flags&RowMajorBit,
|
||||
MatrixType::Options,
|
||||
MatrixType::MaxRowsAtCompileTime,
|
||||
MatrixType::MaxRowsAtCompileTime> l(rows, rows);
|
||||
l.setZero();
|
||||
@@ -523,7 +523,7 @@ bool LU<MatrixType>::solve(
|
||||
return false;
|
||||
}
|
||||
Matrix<Scalar, Dynamic, OtherDerived::ColsAtCompileTime,
|
||||
MatrixType::Flags&RowMajorBit,
|
||||
MatrixType::Options,
|
||||
MatrixType::MaxRowsAtCompileTime, OtherDerived::MaxColsAtCompileTime>
|
||||
d(c.corner(TopLeft, m_rank, c.cols()));
|
||||
m_lu.corner(TopLeft, m_rank, m_rank)
|
||||
|
||||
@@ -25,6 +25,35 @@
|
||||
#ifndef EIGEN_CHOLMODSUPPORT_H
|
||||
#define EIGEN_CHOLMODSUPPORT_H
|
||||
|
||||
template<typename Scalar, typename CholmodType>
|
||||
void ei_cholmod_configure_matrix(CholmodType& mat)
|
||||
{
|
||||
if (ei_is_same_type<Scalar,float>::ret)
|
||||
{
|
||||
mat.xtype = CHOLMOD_REAL;
|
||||
mat.dtype = 1;
|
||||
}
|
||||
else if (ei_is_same_type<Scalar,double>::ret)
|
||||
{
|
||||
mat.xtype = CHOLMOD_REAL;
|
||||
mat.dtype = 0;
|
||||
}
|
||||
else if (ei_is_same_type<Scalar,std::complex<float> >::ret)
|
||||
{
|
||||
mat.xtype = CHOLMOD_COMPLEX;
|
||||
mat.dtype = 1;
|
||||
}
|
||||
else if (ei_is_same_type<Scalar,std::complex<double> >::ret)
|
||||
{
|
||||
mat.xtype = CHOLMOD_COMPLEX;
|
||||
mat.dtype = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
ei_assert(false && "Scalar type not supported by CHOLMOD");
|
||||
}
|
||||
}
|
||||
|
||||
template<typename Scalar, int Flags>
|
||||
cholmod_sparse SparseMatrix<Scalar,Flags>::asCholmodMatrix()
|
||||
{
|
||||
@@ -42,30 +71,7 @@ cholmod_sparse SparseMatrix<Scalar,Flags>::asCholmodMatrix()
|
||||
res.dtype = 0;
|
||||
res.stype = -1;
|
||||
|
||||
if (ei_is_same_type<Scalar,float>::ret)
|
||||
{
|
||||
res.xtype = CHOLMOD_REAL;
|
||||
res.dtype = 1;
|
||||
}
|
||||
else if (ei_is_same_type<Scalar,double>::ret)
|
||||
{
|
||||
res.xtype = CHOLMOD_REAL;
|
||||
res.dtype = 0;
|
||||
}
|
||||
else if (ei_is_same_type<Scalar,std::complex<float> >::ret)
|
||||
{
|
||||
res.xtype = CHOLMOD_COMPLEX;
|
||||
res.dtype = 1;
|
||||
}
|
||||
else if (ei_is_same_type<Scalar,std::complex<double> >::ret)
|
||||
{
|
||||
res.xtype = CHOLMOD_COMPLEX;
|
||||
res.dtype = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
ei_assert(false && "Scalar type not supported by CHOLMOD");
|
||||
}
|
||||
ei_cholmod_configure_matrix<Scalar>(res);
|
||||
|
||||
if (Flags & SelfAdjoint)
|
||||
{
|
||||
@@ -82,6 +88,25 @@ cholmod_sparse SparseMatrix<Scalar,Flags>::asCholmodMatrix()
|
||||
return res;
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
cholmod_dense ei_cholmod_map_eigen_to_dense(MatrixBase<Derived>& mat)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT((ei_traits<Derived>::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
|
||||
typedef typename Derived::Scalar Scalar;
|
||||
|
||||
cholmod_dense res;
|
||||
res.nrow = mat.rows();
|
||||
res.ncol = mat.cols();
|
||||
res.nzmax = res.nrow * res.ncol;
|
||||
res.d = mat.derived().stride();
|
||||
res.x = mat.derived().data();
|
||||
res.z = 0;
|
||||
|
||||
ei_cholmod_configure_matrix<Scalar>(res);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
template<typename Scalar, int Flags>
|
||||
SparseMatrix<Scalar,Flags> SparseMatrix<Scalar,Flags>::Map(cholmod_sparse& cm)
|
||||
{
|
||||
@@ -103,7 +128,7 @@ class SparseLLT<MatrixType,Cholmod> : public SparseLLT<MatrixType>
|
||||
{
|
||||
protected:
|
||||
typedef SparseLLT<MatrixType> Base;
|
||||
using Base::Scalar;
|
||||
using typename Base::Scalar;
|
||||
using Base::RealScalar;
|
||||
using Base::MatrixLIsDirty;
|
||||
using Base::SupernodalFactorIsDirty;
|
||||
@@ -155,11 +180,12 @@ void SparseLLT<MatrixType,Cholmod>::compute(const MatrixType& a)
|
||||
}
|
||||
|
||||
cholmod_sparse A = const_cast<MatrixType&>(a).asCholmodMatrix();
|
||||
m_cholmod.supernodal = CHOLMOD_AUTO;
|
||||
// TODO
|
||||
if (m_flags&IncompleteFactorization)
|
||||
{
|
||||
m_cholmod.nmethods = 1;
|
||||
m_cholmod.method [0].ordering = CHOLMOD_NATURAL;
|
||||
m_cholmod.method[0].ordering = CHOLMOD_NATURAL;
|
||||
m_cholmod.postorder = 0;
|
||||
}
|
||||
else
|
||||
@@ -196,13 +222,19 @@ template<typename MatrixType>
|
||||
template<typename Derived>
|
||||
void SparseLLT<MatrixType,Cholmod>::solveInPlace(MatrixBase<Derived> &b) const
|
||||
{
|
||||
if (m_status & MatrixLIsDirty)
|
||||
matrixL();
|
||||
|
||||
const int size = m_matrix.rows();
|
||||
const int size = m_cholmodFactor->n;
|
||||
ei_assert(size==b.rows());
|
||||
|
||||
Base::solveInPlace(b);
|
||||
// this uses Eigen's triangular sparse solver
|
||||
// if (m_status & MatrixLIsDirty)
|
||||
// matrixL();
|
||||
// Base::solveInPlace(b);
|
||||
// as long as our own triangular sparse solver is not fully optimal,
|
||||
// let's use CHOLMOD's one:
|
||||
cholmod_dense cdb = ei_cholmod_map_eigen_to_dense(b);
|
||||
cholmod_dense* x = cholmod_solve(CHOLMOD_LDLt, m_cholmodFactor, &cdb, &m_cholmod);
|
||||
b = Matrix<typename Base::Scalar,Dynamic,1>::Map(reinterpret_cast<typename Base::Scalar*>(x->x),b.rows());
|
||||
cholmod_free_dense(&x, &m_cholmod);
|
||||
}
|
||||
|
||||
#endif // EIGEN_CHOLMODSUPPORT_H
|
||||
|
||||
@@ -192,7 +192,7 @@ void SparseLDLT<MatrixType,Backend>::_symbolic(const MatrixType& a)
|
||||
m_matrix.resize(size, size);
|
||||
m_parent.resize(size);
|
||||
m_nonZerosPerCol.resize(size);
|
||||
int * tags = ei_alloc_stack(int, size);
|
||||
int * tags = ei_aligned_stack_alloc(int, size);
|
||||
|
||||
const int* Ap = a._outerIndexPtr();
|
||||
const int* Ai = a._innerIndexPtr();
|
||||
@@ -238,7 +238,7 @@ void SparseLDLT<MatrixType,Backend>::_symbolic(const MatrixType& a)
|
||||
Lp[k+1] = Lp[k] + m_nonZerosPerCol[k];
|
||||
|
||||
m_matrix.resizeNonZeros(Lp[size]);
|
||||
ei_free_stack(tags, int, size);
|
||||
ei_aligned_stack_free(tags, int, size);
|
||||
}
|
||||
|
||||
template<typename MatrixType, int Backend>
|
||||
@@ -257,9 +257,9 @@ bool SparseLDLT<MatrixType,Backend>::_numeric(const MatrixType& a)
|
||||
Scalar* Lx = m_matrix._valuePtr();
|
||||
m_diag.resize(size);
|
||||
|
||||
Scalar * y = ei_alloc_stack(Scalar, size);
|
||||
int * pattern = ei_alloc_stack(int, size);
|
||||
int * tags = ei_alloc_stack(int, size);
|
||||
Scalar * y = ei_aligned_stack_alloc(Scalar, size);
|
||||
int * pattern = ei_aligned_stack_alloc(int, size);
|
||||
int * tags = ei_aligned_stack_alloc(int, size);
|
||||
|
||||
const int* P = 0;
|
||||
const int* Pinv = 0;
|
||||
@@ -315,9 +315,9 @@ bool SparseLDLT<MatrixType,Backend>::_numeric(const MatrixType& a)
|
||||
}
|
||||
}
|
||||
|
||||
ei_free_stack(y, Scalar, size);
|
||||
ei_free_stack(pattern, int, size);
|
||||
ei_free_stack(tags, int, size);
|
||||
ei_aligned_stack_free(y, Scalar, size);
|
||||
ei_aligned_stack_free(pattern, int, size);
|
||||
ei_aligned_stack_free(tags, int, size);
|
||||
|
||||
return ok; /* success, diagonal of D is all nonzero */
|
||||
}
|
||||
|
||||
@@ -190,8 +190,14 @@ bool SparseLLT<MatrixType, Backend>::solveInPlace(MatrixBase<Derived> &b) const
|
||||
ei_assert(size==b.rows());
|
||||
|
||||
m_matrix.solveTriangularInPlace(b);
|
||||
// FIXME should be .adjoint() but it fails to compile...
|
||||
m_matrix.transpose().solveTriangularInPlace(b);
|
||||
// FIXME should be simply .adjoint() but it fails to compile...
|
||||
if (NumTraits<Scalar>::IsComplex)
|
||||
{
|
||||
CholMatrixType aux = m_matrix.conjugate();
|
||||
aux.transpose().solveTriangularInPlace(b);
|
||||
}
|
||||
else
|
||||
m_matrix.transpose().solveTriangularInPlace(b);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -148,7 +148,7 @@ class SparseMatrix
|
||||
*/
|
||||
inline void startFill(int reserveSize = 1000)
|
||||
{
|
||||
std::cerr << this << " startFill\n";
|
||||
// std::cerr << this << " startFill\n";
|
||||
setZero();
|
||||
m_data.reserve(reserveSize);
|
||||
}
|
||||
@@ -214,7 +214,7 @@ class SparseMatrix
|
||||
m_data.index(id+1) = inner;
|
||||
//return (m_data.value(id+1) = 0);
|
||||
m_data.value(id+1) = 0;
|
||||
std::cerr << m_outerIndex[outer] << " " << m_outerIndex[outer+1] << "\n";
|
||||
// std::cerr << m_outerIndex[outer] << " " << m_outerIndex[outer+1] << "\n";
|
||||
return m_data.value(id+1);
|
||||
}
|
||||
|
||||
@@ -222,7 +222,7 @@ class SparseMatrix
|
||||
|
||||
inline void endFill()
|
||||
{
|
||||
std::cerr << this << " endFill\n";
|
||||
// std::cerr << this << " endFill\n";
|
||||
int size = m_data.size();
|
||||
int i = m_outerSize;
|
||||
// find the last filled column
|
||||
@@ -238,7 +238,7 @@ class SparseMatrix
|
||||
|
||||
void resize(int rows, int cols)
|
||||
{
|
||||
std::cerr << this << " resize " << rows << "x" << cols << "\n";
|
||||
// std::cerr << this << " resize " << rows << "x" << cols << "\n";
|
||||
const int outerSize = RowMajor ? rows : cols;
|
||||
m_innerSize = RowMajor ? cols : rows;
|
||||
m_data.clear();
|
||||
@@ -273,6 +273,12 @@ class SparseMatrix
|
||||
*this = other.derived();
|
||||
}
|
||||
|
||||
inline SparseMatrix(const SparseMatrix& other)
|
||||
: m_outerSize(0), m_innerSize(0), m_outerIndex(0)
|
||||
{
|
||||
*this = other.derived();
|
||||
}
|
||||
|
||||
inline void swap(SparseMatrix& other)
|
||||
{
|
||||
//EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
|
||||
@@ -409,13 +415,13 @@ class SparseMatrix<Scalar,_Flags>::InnerIterator
|
||||
m_start(m_id), m_end(m_matrix.m_outerIndex[outer+1])
|
||||
{}
|
||||
|
||||
InnerIterator& operator++() { m_id++; return *this; }
|
||||
inline InnerIterator& operator++() { m_id++; return *this; }
|
||||
|
||||
Scalar value() const { return m_matrix.m_data.value(m_id); }
|
||||
inline Scalar value() const { return m_matrix.m_data.value(m_id); }
|
||||
|
||||
int index() const { return m_matrix.m_data.index(m_id); }
|
||||
inline int index() const { return m_matrix.m_data.index(m_id); }
|
||||
|
||||
operator bool() const { return (m_id < m_end) && (m_id>=m_start); }
|
||||
inline operator bool() const { return (m_id < m_end) && (m_id>=m_start); }
|
||||
|
||||
protected:
|
||||
const SparseMatrix& m_matrix;
|
||||
|
||||
@@ -32,6 +32,7 @@ class SparseMatrixBase : public MatrixBase<Derived>
|
||||
|
||||
typedef MatrixBase<Derived> Base;
|
||||
typedef typename Base::Scalar Scalar;
|
||||
typedef typename Base::RealScalar RealScalar;
|
||||
enum {
|
||||
Flags = Base::Flags,
|
||||
RowMajor = ei_traits<Derived>::Flags&RowMajorBit ? 1 : 0
|
||||
|
||||
@@ -62,8 +62,7 @@ enum {
|
||||
|
||||
template<typename Derived> class SparseMatrixBase;
|
||||
template<typename _Scalar, int _Flags = 0> class SparseMatrix;
|
||||
template<typename _Scalar, int _Flags = 0> class HashMatrix;
|
||||
template<typename _Scalar, int _Flags = 0> class LinkedVectorMatrix;
|
||||
template<typename _Scalar, int _Flags = 0> class SparseVector;
|
||||
|
||||
const int AccessPatternNotSupported = 0x0;
|
||||
const int AccessPatternSupported = 0x1;
|
||||
|
||||
308
Eigen/src/Sparse/SparseVector.h
Normal file
308
Eigen/src/Sparse/SparseVector.h
Normal file
@@ -0,0 +1,308 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra. Eigen itself is part of the KDE project.
|
||||
//
|
||||
// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
|
||||
//
|
||||
// Eigen is free software; you can redistribute it and/or
|
||||
// modify it under the terms of the GNU Lesser General Public
|
||||
// License as published by the Free Software Foundation; either
|
||||
// version 3 of the License, or (at your option) any later version.
|
||||
//
|
||||
// Alternatively, you can redistribute it and/or
|
||||
// modify it under the terms of the GNU General Public License as
|
||||
// published by the Free Software Foundation; either version 2 of
|
||||
// the License, or (at your option) any later version.
|
||||
//
|
||||
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
|
||||
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public
|
||||
// License and a copy of the GNU General Public License along with
|
||||
// Eigen. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#ifndef EIGEN_SPARSEVECTOR_H
|
||||
#define EIGEN_SPARSEVECTOR_H
|
||||
|
||||
/** \class SparseVector
|
||||
*
|
||||
* \brief a sparse vector class
|
||||
*
|
||||
* \param _Scalar the scalar type, i.e. the type of the coefficients
|
||||
*
|
||||
* See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
|
||||
*
|
||||
*/
|
||||
template<typename _Scalar, int _Flags>
|
||||
struct ei_traits<SparseVector<_Scalar, _Flags> >
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
enum {
|
||||
IsColVector = _Flags & RowMajorBit ? 0 : 1,
|
||||
|
||||
RowsAtCompileTime = IsColVector ? Dynamic : 1,
|
||||
ColsAtCompileTime = IsColVector ? 1 : Dynamic,
|
||||
MaxRowsAtCompileTime = RowsAtCompileTime,
|
||||
MaxColsAtCompileTime = ColsAtCompileTime,
|
||||
Flags = SparseBit | _Flags,
|
||||
CoeffReadCost = NumTraits<Scalar>::ReadCost,
|
||||
SupportedAccessPatterns = FullyCoherentAccessPattern
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
|
||||
template<typename _Scalar, int _Flags>
|
||||
class SparseVector
|
||||
: public SparseMatrixBase<SparseVector<_Scalar, _Flags> >
|
||||
{
|
||||
public:
|
||||
EIGEN_GENERIC_PUBLIC_INTERFACE(SparseVector)
|
||||
|
||||
protected:
|
||||
public:
|
||||
|
||||
typedef SparseMatrixBase<SparseVector> SparseBase;
|
||||
enum {
|
||||
IsColVector = ei_traits<SparseVector>::IsColVector
|
||||
};
|
||||
|
||||
SparseArray<Scalar> m_data;
|
||||
int m_size;
|
||||
|
||||
|
||||
public:
|
||||
|
||||
inline int rows() const { return IsColVector ? m_size : 1; }
|
||||
inline int cols() const { return IsColVector ? 1 : m_size; }
|
||||
inline int innerSize() const { return m_size; }
|
||||
inline int outerSize() const { return 1; }
|
||||
inline int innerNonZeros(int j) const { ei_assert(j==0); return m_size; }
|
||||
|
||||
inline const Scalar* _valuePtr() const { return &m_data.value(0); }
|
||||
inline Scalar* _valuePtr() { return &m_data.value(0); }
|
||||
|
||||
inline const int* _innerIndexPtr() const { return &m_data.index(0); }
|
||||
inline int* _innerIndexPtr() { return &m_data.index(0); }
|
||||
|
||||
inline Scalar coeff(int row, int col) const
|
||||
{
|
||||
ei_assert((IsColVector ? col : row)==0);
|
||||
return coeff(IsColVector ? row : col);
|
||||
}
|
||||
inline Scalar coeff(int i) const
|
||||
{
|
||||
int start = 0;
|
||||
int end = m_data.size();
|
||||
if (start==end)
|
||||
return Scalar(0);
|
||||
else if (end>0 && i==m_data.index(end-1))
|
||||
return m_data.value(end-1);
|
||||
// ^^ optimization: let's first check if it is the last coefficient
|
||||
// (very common in high level algorithms)
|
||||
|
||||
// TODO move this search to ScalarArray
|
||||
const int* r = std::lower_bound(&m_data.index(start),&m_data.index(end-1),i);
|
||||
const int id = r-&m_data.index(0);
|
||||
return ((*r==i) && (id<end)) ? m_data.value(id) : Scalar(0);
|
||||
}
|
||||
|
||||
inline Scalar& coeffRef(int row, int col)
|
||||
{
|
||||
ei_assert((IsColVector ? col : row)==0);
|
||||
return coeff(IsColVector ? row : col);
|
||||
}
|
||||
|
||||
inline Scalar& coeffRef(int i)
|
||||
{
|
||||
int start = 0;
|
||||
int end = m_data.size();
|
||||
ei_assert(end>=start && "you probably called coeffRef on a non finalized vector");
|
||||
ei_assert(end>start && "coeffRef cannot be called on a zero coefficient");
|
||||
int* r = std::lower_bound(&m_data.index(start),&m_data.index(end),i);
|
||||
const int id = r-&m_data.index(0);
|
||||
ei_assert((*r==i) && (id<end) && "coeffRef cannot be called on a zero coefficient");
|
||||
return m_data.value(id);
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
class InnerIterator;
|
||||
|
||||
inline void setZero() { m_data.clear(); }
|
||||
|
||||
/** \returns the number of non zero coefficients */
|
||||
inline int nonZeros() const { return m_data.size(); }
|
||||
|
||||
/**
|
||||
*/
|
||||
inline void reserve(int reserveSize) { m_data.reserve(reserveSize); }
|
||||
|
||||
/**
|
||||
*/
|
||||
inline Scalar& fill(int i)
|
||||
{
|
||||
m_data.append(0, i);
|
||||
return m_data.value(m_data.size()-1);
|
||||
}
|
||||
|
||||
/** Like fill() but with random coordinates.
|
||||
*/
|
||||
inline Scalar& fillrand(int i)
|
||||
{
|
||||
int startId = 0;
|
||||
int id = m_data.size() - 1;
|
||||
m_data.resize(id+2);
|
||||
|
||||
while ( (id >= startId) && (m_data.index(id) > i) )
|
||||
{
|
||||
m_data.index(id+1) = m_data.index(id);
|
||||
m_data.value(id+1) = m_data.value(id);
|
||||
--id;
|
||||
}
|
||||
m_data.index(id+1) = i;
|
||||
m_data.value(id+1) = 0;
|
||||
return m_data.value(id+1);
|
||||
}
|
||||
|
||||
void resize(int newSize)
|
||||
{
|
||||
m_size = newSize;
|
||||
m_data.clear();
|
||||
}
|
||||
|
||||
void resizeNonZeros(int size) { m_data.resize(size); }
|
||||
|
||||
inline SparseVector() : m_size(0) { resize(0, 0); }
|
||||
|
||||
inline SparseVector(int size) : m_size(0) { resize(size); }
|
||||
|
||||
template<typename OtherDerived>
|
||||
inline SparseVector(const MatrixBase<OtherDerived>& other)
|
||||
: m_size(0)
|
||||
{
|
||||
*this = other.derived();
|
||||
}
|
||||
|
||||
inline SparseVector(const SparseVector& other)
|
||||
: m_size(0)
|
||||
{
|
||||
*this = other.derived();
|
||||
}
|
||||
|
||||
inline void swap(SparseVector& other)
|
||||
{
|
||||
std::swap(m_size, other.m_size);
|
||||
m_data.swap(other.m_data);
|
||||
}
|
||||
|
||||
inline SparseVector& operator=(const SparseVector& other)
|
||||
{
|
||||
if (other.isRValue())
|
||||
{
|
||||
swap(other.const_cast_derived());
|
||||
}
|
||||
else
|
||||
{
|
||||
resize(other.size());
|
||||
m_data = other.m_data;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
// template<typename OtherDerived>
|
||||
// inline SparseVector& operator=(const MatrixBase<OtherDerived>& other)
|
||||
// {
|
||||
// const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
|
||||
// if (needToTranspose)
|
||||
// {
|
||||
// // two passes algorithm:
|
||||
// // 1 - compute the number of coeffs per dest inner vector
|
||||
// // 2 - do the actual copy/eval
|
||||
// // Since each coeff of the rhs has to be evaluated twice, let's evauluate it if needed
|
||||
// typedef typename ei_nested<OtherDerived,2>::type OtherCopy;
|
||||
// OtherCopy otherCopy(other.derived());
|
||||
// typedef typename ei_cleantype<OtherCopy>::type _OtherCopy;
|
||||
//
|
||||
// resize(other.rows(), other.cols());
|
||||
// Eigen::Map<VectorXi>(m_outerIndex,outerSize()).setZero();
|
||||
// // pass 1
|
||||
// // FIXME the above copy could be merged with that pass
|
||||
// for (int j=0; j<otherCopy.outerSize(); ++j)
|
||||
// for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
|
||||
// ++m_outerIndex[it.index()];
|
||||
//
|
||||
// // prefix sum
|
||||
// int count = 0;
|
||||
// VectorXi positions(outerSize());
|
||||
// for (int j=0; j<outerSize(); ++j)
|
||||
// {
|
||||
// int tmp = m_outerIndex[j];
|
||||
// m_outerIndex[j] = count;
|
||||
// positions[j] = count;
|
||||
// count += tmp;
|
||||
// }
|
||||
// m_outerIndex[outerSize()] = count;
|
||||
// // alloc
|
||||
// m_data.resize(count);
|
||||
// // pass 2
|
||||
// for (int j=0; j<otherCopy.outerSize(); ++j)
|
||||
// for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
|
||||
// {
|
||||
// int pos = positions[it.index()]++;
|
||||
// m_data.index(pos) = j;
|
||||
// m_data.value(pos) = it.value();
|
||||
// }
|
||||
//
|
||||
// return *this;
|
||||
// }
|
||||
// else
|
||||
// {
|
||||
// // there is no special optimization
|
||||
// return SparseMatrixBase<SparseMatrix>::operator=(other.derived());
|
||||
// }
|
||||
// }
|
||||
|
||||
friend std::ostream & operator << (std::ostream & s, const SparseVector& m)
|
||||
{
|
||||
for (unsigned int i=0; i<m.nonZeros(); ++i)
|
||||
s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
|
||||
s << std::endl;
|
||||
return s;
|
||||
}
|
||||
|
||||
/** Destructor */
|
||||
inline ~SparseVector() {}
|
||||
};
|
||||
|
||||
template<typename Scalar, int _Flags>
|
||||
class SparseVector<Scalar,_Flags>::InnerIterator
|
||||
{
|
||||
public:
|
||||
InnerIterator(const SparseVector& vec, int outer=0)
|
||||
: m_vector(vec), m_id(0), m_end(vec.nonZeros())
|
||||
{
|
||||
ei_assert(outer==0);
|
||||
}
|
||||
|
||||
template<unsigned int Added, unsigned int Removed>
|
||||
InnerIterator(const Flagged<SparseVector,Added,Removed>& vec, int outer)
|
||||
: m_vector(vec._expression()), m_id(0), m_end(m_vector.nonZeros())
|
||||
{}
|
||||
|
||||
inline InnerIterator& operator++() { m_id++; return *this; }
|
||||
|
||||
inline Scalar value() const { return m_vector.m_data.value(m_id); }
|
||||
|
||||
inline int index() const { return m_vector.m_data.index(m_id); }
|
||||
|
||||
inline operator bool() const { return (m_id < m_end); }
|
||||
|
||||
protected:
|
||||
const SparseVector& m_vector;
|
||||
int m_id;
|
||||
const int m_end;
|
||||
};
|
||||
|
||||
#endif // EIGEN_SPARSEVECTOR_H
|
||||
@@ -1,6 +1,6 @@
|
||||
PROJECT(BTL)
|
||||
|
||||
CMAKE_MINIMUM_REQUIRED(VERSION 2.4)
|
||||
CMAKE_MINIMUM_REQUIRED(VERSION 2.6.2)
|
||||
|
||||
set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)
|
||||
include(MacroOptionalAddSubdirectory)
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
# - Try to find eigen2 headers
|
||||
# - Try to find Eigen2 lib
|
||||
# Once done this will define
|
||||
#
|
||||
# EIGEN2_FOUND - system has eigen2 lib
|
||||
# EIGEN2_INCLUDE_DIR - the eigen2 include directory
|
||||
#
|
||||
# Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
|
||||
# Adapted from FindEigen.cmake:
|
||||
# EIGEN2_FOUND - system has eigen lib
|
||||
# EIGEN2_INCLUDE_DIR - the eigen include directory
|
||||
|
||||
# Copyright (c) 2006, 2007 Montel Laurent, <montel@kde.org>
|
||||
# Redistribution and use is allowed according to the terms of the BSD license.
|
||||
# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
|
||||
@@ -19,12 +17,14 @@ else (EIGEN2_INCLUDE_DIR)
|
||||
|
||||
find_path(EIGEN2_INCLUDE_DIR NAMES Eigen/Core
|
||||
PATHS
|
||||
${Eigen_SOURCE_DIR}/
|
||||
${INCLUDE_INSTALL_DIR}
|
||||
${KDE4_INCLUDE_DIR}
|
||||
PATH_SUFFIXES eigen2
|
||||
)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(Eigen2 DEFAULT_MSG EIGEN2_INCLUDE_DIR)
|
||||
find_package_handle_standard_args(Eigen2 DEFAULT_MSG EIGEN2_INCLUDE_DIR )
|
||||
|
||||
|
||||
mark_as_advanced(EIGEN2_INCLUDE_DIR)
|
||||
|
||||
|
||||
@@ -38,11 +38,11 @@ public :
|
||||
typedef typename f77_interface_base<real>::gene_vector gene_vector;
|
||||
|
||||
static void free_matrix(gene_matrix & A, int N){
|
||||
ei_aligned_free(A);
|
||||
ei_aligned_free(A, 0);
|
||||
}
|
||||
|
||||
static void free_vector(gene_vector & B){
|
||||
ei_aligned_free(B);
|
||||
ei_aligned_free(B, 0);
|
||||
}
|
||||
|
||||
static inline void matrix_from_stl(gene_matrix & A, stl_matrix & A_stl){
|
||||
|
||||
@@ -159,6 +159,7 @@ ei_add_test(dynalloc)
|
||||
ei_add_test(nomalloc)
|
||||
ei_add_test(mixingtypes)
|
||||
ei_add_test(packetmath)
|
||||
ei_add_test(unalignedassert)
|
||||
ei_add_test(basicstuff)
|
||||
ei_add_test(linearstructure)
|
||||
ei_add_test(cwiseop)
|
||||
|
||||
@@ -38,8 +38,8 @@ template<typename MatrixType> void determinant(const MatrixType& m)
|
||||
m2.setRandom();
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
Scalar x = ei_random<Scalar>();
|
||||
VERIFY(ei_isApprox(MatrixType::Identity(size, size).determinant(), Scalar(1)));
|
||||
VERIFY(ei_isApprox((m1*m2).determinant(), m1.determinant() * m2.determinant()));
|
||||
VERIFY_IS_APPROX(MatrixType::Identity(size, size).determinant(), Scalar(1));
|
||||
VERIFY_IS_APPROX((m1*m2).determinant(), m1.determinant() * m2.determinant());
|
||||
if(size==1) return;
|
||||
int i = ei_random<int>(0, size-1);
|
||||
int j;
|
||||
@@ -48,18 +48,18 @@ template<typename MatrixType> void determinant(const MatrixType& m)
|
||||
} while(j==i);
|
||||
m2 = m1;
|
||||
m2.row(i).swap(m2.row(j));
|
||||
VERIFY(ei_isApprox(m2.determinant(), -m1.determinant()));
|
||||
VERIFY_IS_APPROX(m2.determinant(), -m1.determinant());
|
||||
m2 = m1;
|
||||
m2.col(i).swap(m2.col(j));
|
||||
VERIFY(ei_isApprox(m2.determinant(), -m1.determinant()));
|
||||
VERIFY(ei_isApprox(m2.determinant(), m2.transpose().determinant()));
|
||||
VERIFY(ei_isApprox(ei_conj(m2.determinant()), m2.adjoint().determinant()));
|
||||
VERIFY_IS_APPROX(m2.determinant(), -m1.determinant());
|
||||
VERIFY_IS_APPROX(m2.determinant(), m2.transpose().determinant());
|
||||
VERIFY_IS_APPROX(ei_conj(m2.determinant()), m2.adjoint().determinant());
|
||||
m2 = m1;
|
||||
m2.row(i) += x*m2.row(j);
|
||||
VERIFY(ei_isApprox(m2.determinant(), m1.determinant()));
|
||||
VERIFY_IS_APPROX(m2.determinant(), m1.determinant());
|
||||
m2 = m1;
|
||||
m2.row(i) *= x;
|
||||
VERIFY(ei_isApprox(m2.determinant(), m1.determinant() * x));
|
||||
VERIFY_IS_APPROX(m2.determinant(), m1.determinant() * x);
|
||||
}
|
||||
|
||||
void test_determinant()
|
||||
@@ -72,4 +72,5 @@ void test_determinant()
|
||||
CALL_SUBTEST( determinant(Matrix<std::complex<double>, 10, 10>()) );
|
||||
CALL_SUBTEST( determinant(MatrixXd(20, 20)) );
|
||||
}
|
||||
CALL_SUBTEST( determinant(MatrixXd(200, 200)) );
|
||||
}
|
||||
|
||||
@@ -45,8 +45,8 @@ template<typename VectorType> void map_class(const VectorType& m)
|
||||
VERIFY_IS_APPROX(ma1, ma2);
|
||||
VERIFY_IS_APPROX(ma1, ma3);
|
||||
|
||||
ei_aligned_free(array1);
|
||||
ei_aligned_free(array2);
|
||||
ei_aligned_free(array1, size);
|
||||
ei_aligned_free(array2, size);
|
||||
delete[] array3;
|
||||
}
|
||||
|
||||
@@ -71,8 +71,8 @@ template<typename VectorType> void map_static_methods(const VectorType& m)
|
||||
VERIFY_IS_APPROX(ma1, ma2);
|
||||
VERIFY_IS_APPROX(ma1, ma3);
|
||||
|
||||
ei_aligned_free(array1);
|
||||
ei_aligned_free(array2);
|
||||
ei_aligned_free(array1, size);
|
||||
ei_aligned_free(array2, size);
|
||||
delete[] array3;
|
||||
}
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
// Eigen. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#define EIGEN_NO_STATIC_ASSERT // turn static asserts into runtime asserts in order to check them
|
||||
#define EIGEN_DONT_VECTORIZE // SSE intrinsics aren't designed to allow mixing types
|
||||
#include "main.h"
|
||||
|
||||
|
||||
|
||||
@@ -25,8 +25,11 @@
|
||||
|
||||
// this hack is needed to make this file compiles with -pedantic (gcc)
|
||||
#define throw(X)
|
||||
// discard vectorization since operator new is not called in that case
|
||||
// discard vectorization since the global operator new is not called in that case
|
||||
#define EIGEN_DONT_VECTORIZE 1
|
||||
// discard stack allocation as that too bypasses the global operator new
|
||||
#define EIGEN_STACK_ALLOCATION_LIMIT 0
|
||||
|
||||
#include "main.h"
|
||||
|
||||
void* operator new[] (size_t n)
|
||||
@@ -84,4 +87,5 @@ void test_nomalloc()
|
||||
VERIFY_RAISES_ASSERT(MatrixXd dummy = MatrixXd::Random(3,3));
|
||||
CALL_SUBTEST( nomalloc(Matrix<float, 1, 1>()) );
|
||||
CALL_SUBTEST( nomalloc(Matrix4d()) );
|
||||
CALL_SUBTEST( nomalloc(Matrix<float,32,32>()) );
|
||||
}
|
||||
|
||||
@@ -23,14 +23,14 @@
|
||||
// Eigen. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#include "main.h"
|
||||
#include <Eigen/Regression>
|
||||
#include <Eigen/LeastSquares>
|
||||
|
||||
template<typename VectorType,
|
||||
typename HyperplaneType>
|
||||
void makeNoisyCohyperplanarPoints(int numPoints,
|
||||
VectorType **points,
|
||||
HyperplaneType *hyperplane,
|
||||
typename VectorType::Scalar noiseAmplitude )
|
||||
typename VectorType::Scalar noiseAmplitude)
|
||||
{
|
||||
typedef typename VectorType::Scalar Scalar;
|
||||
const int size = points[0]->size();
|
||||
|
||||
@@ -24,6 +24,27 @@
|
||||
|
||||
#include "sparse.h"
|
||||
|
||||
template<typename Scalar> void
|
||||
initSPD(double density,
|
||||
Matrix<Scalar,Dynamic,Dynamic>& refMat,
|
||||
SparseMatrix<Scalar>& sparseMat)
|
||||
{
|
||||
Matrix<Scalar,Dynamic,Dynamic> aux(refMat.rows(),refMat.cols());
|
||||
initSparse(density,refMat,sparseMat);
|
||||
refMat = refMat * refMat.adjoint();
|
||||
for (int k=0; k<2; ++k)
|
||||
{
|
||||
initSparse(density,aux,sparseMat,ForceNonZeroDiag);
|
||||
refMat += aux * aux.adjoint();
|
||||
}
|
||||
sparseMat.startFill();
|
||||
for (int j=0 ; j<sparseMat.cols(); ++j)
|
||||
for (int i=j ; i<sparseMat.rows(); ++i)
|
||||
if (refMat(i,j)!=Scalar(0))
|
||||
sparseMat.fill(i,j) = refMat(i,j);
|
||||
sparseMat.endFill();
|
||||
}
|
||||
|
||||
template<typename Scalar> void sparse_solvers(int rows, int cols)
|
||||
{
|
||||
double density = std::max(8./(rows*cols), 0.01);
|
||||
@@ -56,7 +77,6 @@ template<typename Scalar> void sparse_solvers(int rows, int cols)
|
||||
}
|
||||
|
||||
// test LLT
|
||||
if (!NumTraits<Scalar>::IsComplex)
|
||||
{
|
||||
// TODO fix the issue with complex (see SparseLLT::solveInPlace)
|
||||
SparseMatrix<Scalar> m2(rows, cols);
|
||||
@@ -65,49 +85,54 @@ template<typename Scalar> void sparse_solvers(int rows, int cols)
|
||||
DenseVector b = DenseVector::Random(cols);
|
||||
DenseVector refX(cols), x(cols);
|
||||
|
||||
initSparse<Scalar>(density, refMat2, m2, ForceNonZeroDiag|MakeLowerTriangular, &zeroCoords, &nonzeroCoords);
|
||||
refMat2 += refMat2.adjoint();
|
||||
refMat2.diagonal() *= 0.5;
|
||||
initSPD(density, refMat2, m2);
|
||||
|
||||
refMat2.llt().solve(b, &refX);
|
||||
typedef SparseMatrix<Scalar,LowerTriangular|SelfAdjoint> SparseSelfAdjointMatrix;
|
||||
x = b;
|
||||
SparseLLT<SparseSelfAdjointMatrix> (m2).solveInPlace(x);
|
||||
//VERIFY(refX.isApprox(x,test_precision<Scalar>()) && "LLT: default");
|
||||
if (!NumTraits<Scalar>::IsComplex)
|
||||
{
|
||||
x = b;
|
||||
SparseLLT<SparseSelfAdjointMatrix> (m2).solveInPlace(x);
|
||||
VERIFY(refX.isApprox(x,test_precision<Scalar>()) && "LLT: default");
|
||||
}
|
||||
#ifdef EIGEN_CHOLMOD_SUPPORT
|
||||
x = b;
|
||||
SparseLLT<SparseSelfAdjointMatrix,Cholmod>(m2).solveInPlace(x);
|
||||
VERIFY(refX.isApprox(x,test_precision<Scalar>()) && "LLT: cholmod");
|
||||
#endif
|
||||
#ifdef EIGEN_TAUCS_SUPPORT
|
||||
x = b;
|
||||
SparseLLT<SparseSelfAdjointMatrix,Taucs>(m2,IncompleteFactorization).solveInPlace(x);
|
||||
VERIFY(refX.isApprox(x,test_precision<Scalar>()) && "LLT: taucs (IncompleteFactorization)");
|
||||
x = b;
|
||||
SparseLLT<SparseSelfAdjointMatrix,Taucs>(m2,SupernodalMultifrontal).solveInPlace(x);
|
||||
VERIFY(refX.isApprox(x,test_precision<Scalar>()) && "LLT: taucs (SupernodalMultifrontal)");
|
||||
x = b;
|
||||
SparseLLT<SparseSelfAdjointMatrix,Taucs>(m2,SupernodalLeftLooking).solveInPlace(x);
|
||||
VERIFY(refX.isApprox(x,test_precision<Scalar>()) && "LLT: taucs (SupernodalLeftLooking)");
|
||||
#endif
|
||||
if (!NumTraits<Scalar>::IsComplex)
|
||||
{
|
||||
#ifdef EIGEN_TAUCS_SUPPORT
|
||||
x = b;
|
||||
SparseLLT<SparseSelfAdjointMatrix,Taucs>(m2,IncompleteFactorization).solveInPlace(x);
|
||||
VERIFY(refX.isApprox(x,test_precision<Scalar>()) && "LLT: taucs (IncompleteFactorization)");
|
||||
x = b;
|
||||
SparseLLT<SparseSelfAdjointMatrix,Taucs>(m2,SupernodalMultifrontal).solveInPlace(x);
|
||||
VERIFY(refX.isApprox(x,test_precision<Scalar>()) && "LLT: taucs (SupernodalMultifrontal)");
|
||||
x = b;
|
||||
SparseLLT<SparseSelfAdjointMatrix,Taucs>(m2,SupernodalLeftLooking).solveInPlace(x);
|
||||
VERIFY(refX.isApprox(x,test_precision<Scalar>()) && "LLT: taucs (SupernodalLeftLooking)");
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
// test LDLT
|
||||
if (!NumTraits<Scalar>::IsComplex)
|
||||
{
|
||||
// TODO fix the issue with complex (see SparseLDT::solveInPlace)
|
||||
// TODO fix the issue with complex (see SparseLDLT::solveInPlace)
|
||||
SparseMatrix<Scalar> m2(rows, cols);
|
||||
DenseMatrix refMat2(rows, cols);
|
||||
|
||||
DenseVector b = DenseVector::Random(cols);
|
||||
DenseVector refX(cols), x(cols);
|
||||
|
||||
initSparse<Scalar>(density, refMat2, m2, ForceNonZeroDiag|MakeUpperTriangular, &zeroCoords, &nonzeroCoords);
|
||||
//initSPD(density, refMat2, m2);
|
||||
initSparse<Scalar>(density, refMat2, m2, ForceNonZeroDiag|MakeUpperTriangular, 0, 0);
|
||||
refMat2 += refMat2.adjoint();
|
||||
refMat2.diagonal() *= 0.5;
|
||||
|
||||
refMat2.ldlt().solve(b, &refX);
|
||||
typedef SparseMatrix<Scalar,LowerTriangular|SelfAdjoint> SparseSelfAdjointMatrix;
|
||||
typedef SparseMatrix<Scalar,UpperTriangular|SelfAdjoint> SparseSelfAdjointMatrix;
|
||||
x = b;
|
||||
SparseLDLT<SparseSelfAdjointMatrix> ldlt(m2);
|
||||
if (ldlt.succeeded())
|
||||
@@ -177,6 +202,6 @@ void test_sparse_solvers()
|
||||
for(int i = 0; i < g_repeat; i++) {
|
||||
CALL_SUBTEST( sparse_solvers<double>(8, 8) );
|
||||
CALL_SUBTEST( sparse_solvers<std::complex<double> >(16, 16) );
|
||||
CALL_SUBTEST( sparse_solvers<double>(33, 33) );
|
||||
CALL_SUBTEST( sparse_solvers<double>(101, 101) );
|
||||
}
|
||||
}
|
||||
|
||||
112
test/unalignedassert.cpp
Normal file
112
test/unalignedassert.cpp
Normal file
@@ -0,0 +1,112 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra. Eigen itself is part of the KDE project.
|
||||
//
|
||||
// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>
|
||||
//
|
||||
// Eigen is free software; you can redistribute it and/or
|
||||
// modify it under the terms of the GNU Lesser General Public
|
||||
// License as published by the Free Software Foundation; either
|
||||
// version 3 of the License, or (at your option) any later version.
|
||||
//
|
||||
// Alternatively, you can redistribute it and/or
|
||||
// modify it under the terms of the GNU General Public License as
|
||||
// published by the Free Software Foundation; either version 2 of
|
||||
// the License, or (at your option) any later version.
|
||||
//
|
||||
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
|
||||
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public
|
||||
// License and a copy of the GNU General Public License along with
|
||||
// Eigen. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#include "main.h"
|
||||
|
||||
struct Good1
|
||||
{
|
||||
MatrixXd m; // good: m will allocate its own array, taking care of alignment.
|
||||
Good1() : m(20,20) {}
|
||||
};
|
||||
|
||||
struct Good2
|
||||
{
|
||||
Matrix3d m; // good: m's size isn't a multiple of 16 bytes, so m doesn't have to be aligned
|
||||
};
|
||||
|
||||
struct Good3
|
||||
{
|
||||
Vector2f m; // good: same reason
|
||||
};
|
||||
|
||||
struct Bad4
|
||||
{
|
||||
Vector2d m; // bad: sizeof(m)%16==0 so alignment is required
|
||||
};
|
||||
|
||||
struct Bad5
|
||||
{
|
||||
Matrix<float, 2, 6> m; // bad: same reason
|
||||
};
|
||||
|
||||
struct Bad6
|
||||
{
|
||||
Matrix<double, 3, 4> m; // bad: same reason
|
||||
};
|
||||
|
||||
struct Good7 : Eigen::WithAlignedOperatorNew
|
||||
{
|
||||
Vector2d m;
|
||||
float f; // make the struct have sizeof%16!=0 to make it a little more tricky when we allow an array of 2 such objects
|
||||
};
|
||||
|
||||
struct Good8 : Eigen::WithAlignedOperatorNew
|
||||
{
|
||||
float f; // try the f at first -- the EIGEN_ALIGN_128 attribute of m should make that still work
|
||||
Matrix4f m;
|
||||
};
|
||||
|
||||
struct Good9
|
||||
{
|
||||
Matrix<float,2,2,Matrix_DontAlign> m; // good: no alignment requested
|
||||
float f;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
void check_unalignedassert_good()
|
||||
{
|
||||
T *x, *y;
|
||||
x = new T;
|
||||
delete x;
|
||||
y = new T[2];
|
||||
delete[] y;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void check_unalignedassert_bad()
|
||||
{
|
||||
float buf[sizeof(T)+16];
|
||||
float *unaligned = buf;
|
||||
while((reinterpret_cast<size_t>(unaligned)&0xf)==0) ++unaligned; // make sure unaligned is really unaligned
|
||||
T *x = new(static_cast<void*>(unaligned)) T;
|
||||
x->~T();
|
||||
}
|
||||
|
||||
void unalignedassert()
|
||||
{
|
||||
check_unalignedassert_good<Good1>();
|
||||
check_unalignedassert_good<Good2>();
|
||||
check_unalignedassert_good<Good3>();
|
||||
VERIFY_RAISES_ASSERT(check_unalignedassert_bad<Bad4>());
|
||||
VERIFY_RAISES_ASSERT(check_unalignedassert_bad<Bad5>());
|
||||
VERIFY_RAISES_ASSERT(check_unalignedassert_bad<Bad6>());
|
||||
check_unalignedassert_good<Good7>();
|
||||
check_unalignedassert_good<Good8>();
|
||||
check_unalignedassert_good<Good9>();
|
||||
}
|
||||
|
||||
void test_unalignedassert()
|
||||
{
|
||||
CALL_SUBTEST(unalignedassert());
|
||||
}
|
||||
Reference in New Issue
Block a user