MOAB
4.9.3pre
|
A versatible sparse matrix representation. More...
#include <SparseMatrix.h>
Classes | |
struct | default_prunning_func |
class | SingletonVector |
Public Types | |
enum | { Options = _Options } |
typedef MappedSparseMatrix < Scalar, Flags > | Map |
typedef Diagonal< SparseMatrix > | DiagonalReturnType |
typedef Diagonal< const SparseMatrix > | ConstDiagonalReturnType |
typedef Base::InnerIterator | InnerIterator |
typedef Base::ReverseInnerIterator | ReverseInnerIterator |
typedef internal::CompressedStorage < Scalar, StorageIndex > | Storage |
typedef Base::IndexVector | IndexVector |
typedef Base::ScalarVector | ScalarVector |
Public Member Functions | |
Index | rows () const |
Index | cols () const |
Index | innerSize () const |
Index | outerSize () const |
const Scalar * | valuePtr () const |
Scalar * | valuePtr () |
const StorageIndex * | innerIndexPtr () const |
StorageIndex * | innerIndexPtr () |
const StorageIndex * | outerIndexPtr () const |
StorageIndex * | outerIndexPtr () |
const StorageIndex * | innerNonZeroPtr () const |
StorageIndex * | innerNonZeroPtr () |
Storage & | data () |
const Storage & | data () const |
Scalar | coeff (Index row, Index col) const |
Scalar & | coeffRef (Index row, Index col) |
Scalar & | insert (Index row, Index col) |
void | setZero () |
void | reserve (Index reserveSize) |
template<class SizesType > | |
void | reserve (const SizesType &reserveSizes, const typename SizesType::value_type &enableif=typename SizesType::value_type()) |
Scalar & | insertBack (Index row, Index col) |
Scalar & | insertBackByOuterInner (Index outer, Index inner) |
Scalar & | insertBackByOuterInnerUnordered (Index outer, Index inner) |
void | startVec (Index outer) |
void | finalize () |
template<typename InputIterators > | |
void | setFromTriplets (const InputIterators &begin, const InputIterators &end) |
template<typename InputIterators , typename DupFunctor > | |
void | setFromTriplets (const InputIterators &begin, const InputIterators &end, DupFunctor dup_func) |
void | sumupDuplicates () |
template<typename DupFunctor > | |
void | collapseDuplicates (DupFunctor dup_func=DupFunctor()) |
Scalar & | insertByOuterInner (Index j, Index i) |
void | makeCompressed () |
void | uncompress () |
void | prune (const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision()) |
template<typename KeepFunc > | |
void | prune (const KeepFunc &keep=KeepFunc()) |
void | conservativeResize (Index rows, Index cols) |
void | resize (Index rows, Index cols) |
void | resizeNonZeros (Index size) |
const ConstDiagonalReturnType | diagonal () const |
DiagonalReturnType | diagonal () |
SparseMatrix () | |
SparseMatrix (Index rows, Index cols) | |
template<typename OtherDerived > | |
SparseMatrix (const SparseMatrixBase< OtherDerived > &other) | |
template<typename OtherDerived , unsigned int UpLo> | |
SparseMatrix (const SparseSelfAdjointView< OtherDerived, UpLo > &other) | |
SparseMatrix (const SparseMatrix &other) | |
template<typename OtherDerived > | |
SparseMatrix (const ReturnByValue< OtherDerived > &other) | |
Copy constructor with in-place evaluation. | |
template<typename OtherDerived > | |
SparseMatrix (const DiagonalBase< OtherDerived > &other) | |
Copy constructor with in-place evaluation. | |
void | swap (SparseMatrix &other) |
void | setIdentity () |
SparseMatrix & | operator= (const SparseMatrix &other) |
template<typename OtherDerived > | |
SparseMatrix & | operator= (const EigenBase< OtherDerived > &other) |
template<typename OtherDerived > | |
EIGEN_DONT_INLINE SparseMatrix & | operator= (const SparseMatrixBase< OtherDerived > &other) |
~SparseMatrix () | |
Scalar | sum () const |
EIGEN_STRONG_INLINE Scalar & | insertBackUncompressed (Index row, Index col) |
Protected Types | |
typedef SparseMatrix< Scalar,(Flags &~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> | TransposedSparseMatrix |
Protected Member Functions | |
template<class SizesType > | |
void | reserveInnerVectors (const SizesType &reserveSizes) |
template<typename Other > | |
void | initAssignment (const Other &other) |
EIGEN_DONT_INLINE Scalar & | insertCompressed (Index row, Index col) |
EIGEN_DONT_INLINE Scalar & | insertUncompressed (Index row, Index col) |
Protected Attributes | |
Index | m_outerSize |
Index | m_innerSize |
StorageIndex * | m_outerIndex |
StorageIndex * | m_innerNonZeros |
Storage | m_data |
Private Types | |
typedef SparseCompressedBase < SparseMatrix > | Base |
Static Private Member Functions | |
static void | check_template_parameters () |
Friends | |
std::ostream & | operator<< (std::ostream &s, const SparseMatrix &m) |
A versatible sparse matrix representation.
This class implements a more versatile variants of the common compressed row/column storage format. Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index. All the non zeros are stored in a single large buffer. Unlike the compressed format, there might be extra space inbetween the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero can be done with limited memory reallocation and copies.
A call to the function makeCompressed() turns the matrix into the standard compressed format compatible with many library.
More details on this storage sceheme are given in the manual pages.
_Scalar | the scalar type, i.e. the type of the coefficients |
_Options | Union of bit flags controlling the storage scheme. Currently the only possibility is ColMajor or RowMajor. The default is 0 which means column-major. |
_Index | the type of the indices. It has to be a signed type (e.g., short, int, std::ptrdiff_t). Default is int . |
This class can be extended with the help of the plugin mechanism described on the page TopicCustomizingEigen by defining the preprocessor symbol EIGEN_SPARSEMATRIX_PLUGIN
.
Definition at line 92 of file SparseMatrix.h.
typedef SparseCompressedBase<SparseMatrix> Eigen::SparseMatrix< _Scalar, _Options, _Index >::Base [private] |
Reimplemented from Eigen::SparseCompressedBase< SparseMatrix< _Scalar, _Options, _Index > >.
Definition at line 95 of file SparseMatrix.h.
typedef Diagonal<const SparseMatrix> Eigen::SparseMatrix< _Scalar, _Options, _Index >::ConstDiagonalReturnType |
Definition at line 106 of file SparseMatrix.h.
typedef Diagonal<SparseMatrix> Eigen::SparseMatrix< _Scalar, _Options, _Index >::DiagonalReturnType |
Definition at line 105 of file SparseMatrix.h.
typedef Base::IndexVector Eigen::SparseMatrix< _Scalar, _Options, _Index >::IndexVector |
Reimplemented from Eigen::SparseCompressedBase< SparseMatrix< _Scalar, _Options, _Index > >.
Definition at line 117 of file SparseMatrix.h.
typedef Base::InnerIterator Eigen::SparseMatrix< _Scalar, _Options, _Index >::InnerIterator |
Definition at line 107 of file SparseMatrix.h.
typedef MappedSparseMatrix<Scalar,Flags> Eigen::SparseMatrix< _Scalar, _Options, _Index >::Map |
Definition at line 104 of file SparseMatrix.h.
typedef Base::ReverseInnerIterator Eigen::SparseMatrix< _Scalar, _Options, _Index >::ReverseInnerIterator |
Definition at line 108 of file SparseMatrix.h.
typedef Base::ScalarVector Eigen::SparseMatrix< _Scalar, _Options, _Index >::ScalarVector |
Reimplemented from Eigen::SparseMatrixBase< SparseMatrix< _Scalar, _Options, _Index > >.
Definition at line 118 of file SparseMatrix.h.
typedef internal::CompressedStorage<Scalar,StorageIndex> Eigen::SparseMatrix< _Scalar, _Options, _Index >::Storage |
Definition at line 112 of file SparseMatrix.h.
typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> Eigen::SparseMatrix< _Scalar, _Options, _Index >::TransposedSparseMatrix [protected] |
Definition at line 120 of file SparseMatrix.h.
anonymous enum |
Eigen::SparseMatrix< _Scalar, _Options, _Index >::SparseMatrix | ( | ) | [inline] |
Default constructor yielding an empty 0
x
0
matrix
Definition at line 654 of file SparseMatrix.h.
: m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) { check_template_parameters(); resize(0, 0); }
Eigen::SparseMatrix< _Scalar, _Options, _Index >::SparseMatrix | ( | Index | rows, |
Index | cols | ||
) | [inline] |
Constructs a rows x
cols empty matrix
Definition at line 662 of file SparseMatrix.h.
: m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) { check_template_parameters(); resize(rows, cols); }
Eigen::SparseMatrix< _Scalar, _Options, _Index >::SparseMatrix | ( | const SparseMatrixBase< OtherDerived > & | other | ) | [inline] |
Constructs a sparse matrix from the sparse expression other
Definition at line 671 of file SparseMatrix.h.
: m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) { EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) check_template_parameters(); const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit); if (needToTranspose) *this = other.derived(); else { #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN #endif internal::call_assignment_no_alias(*this, other.derived()); } }
Eigen::SparseMatrix< _Scalar, _Options, _Index >::SparseMatrix | ( | const SparseSelfAdjointView< OtherDerived, UpLo > & | other | ) | [inline] |
Constructs a sparse matrix from the sparse selfadjoint view other
Definition at line 691 of file SparseMatrix.h.
: m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) { check_template_parameters(); Base::operator=(other); }
Eigen::SparseMatrix< _Scalar, _Options, _Index >::SparseMatrix | ( | const SparseMatrix< _Scalar, _Options, _Index > & | other | ) | [inline] |
Copy constructor (it performs a deep copy)
Definition at line 699 of file SparseMatrix.h.
: Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) { check_template_parameters(); *this = other.derived(); }
Eigen::SparseMatrix< _Scalar, _Options, _Index >::SparseMatrix | ( | const ReturnByValue< OtherDerived > & | other | ) | [inline] |
Copy constructor with in-place evaluation.
Definition at line 708 of file SparseMatrix.h.
: Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) { check_template_parameters(); initAssignment(other); other.evalTo(*this); }
Eigen::SparseMatrix< _Scalar, _Options, _Index >::SparseMatrix | ( | const DiagonalBase< OtherDerived > & | other | ) | [inline, explicit] |
Copy constructor with in-place evaluation.
Definition at line 718 of file SparseMatrix.h.
: Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) { check_template_parameters(); *this = other.derived(); }
Eigen::SparseMatrix< _Scalar, _Options, _Index >::~SparseMatrix | ( | ) | [inline] |
Destructor
Definition at line 821 of file SparseMatrix.h.
{ std::free(m_outerIndex); std::free(m_innerNonZeros); }
static void Eigen::SparseMatrix< _Scalar, _Options, _Index >::check_template_parameters | ( | ) | [inline, static, private] |
Definition at line 887 of file SparseMatrix.h.
{ EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS); }
Scalar Eigen::SparseMatrix< _Scalar, _Options, _Index >::coeff | ( | Index | row, |
Index | col | ||
) | const [inline] |
Definition at line 183 of file SparseMatrix.h.
{ eigen_assert(row>=0 && row<rows() && col>=0 && col<cols()); const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1]; return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner)); }
Scalar& Eigen::SparseMatrix< _Scalar, _Options, _Index >::coeffRef | ( | Index | row, |
Index | col | ||
) | [inline] |
If the element does not exist then it is inserted via the insert(Index,Index) function which itself turns the matrix into a non compressed form if that was not the case.
This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index) function if the element does not already exist.
Definition at line 201 of file SparseMatrix.h.
{ eigen_assert(row>=0 && row<rows() && col>=0 && col<cols()); const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; Index start = m_outerIndex[outer]; Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1]; eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix"); if(end<=start) return insert(row,col); const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner)); if((p<end) && (m_data.index(p)==inner)) return m_data.value(p); else return insert(row,col); }
void Eigen::SparseMatrix< Scalar, _Options, _Index >::collapseDuplicates | ( | DupFunctor | dup_func = DupFunctor() | ) |
Definition at line 1004 of file SparseMatrix.h.
{ eigen_assert(!isCompressed()); // TODO, in practice we should be able to use m_innerNonZeros for that task IndexVector wi(innerSize()); wi.fill(-1); StorageIndex count = 0; // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers for(Index j=0; j<outerSize(); ++j) { StorageIndex start = count; Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j]; for(Index k=m_outerIndex[j]; k<oldEnd; ++k) { Index i = m_data.index(k); if(wi(i)>=start) { // we already meet this entry => accumulate it m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k)); } else { m_data.value(count) = m_data.value(k); m_data.index(count) = m_data.index(k); wi(i) = count; ++count; } } m_outerIndex[j] = start; } m_outerIndex[m_outerSize] = count; // turn the matrix into compressed form std::free(m_innerNonZeros); m_innerNonZeros = 0; m_data.resize(m_outerIndex[m_outerSize]); }
Index Eigen::SparseMatrix< _Scalar, _Options, _Index >::cols | ( | void | ) | const [inline] |
Reimplemented from Eigen::SparseMatrixBase< SparseMatrix< _Scalar, _Options, _Index > >.
Definition at line 133 of file SparseMatrix.h.
{ return IsRowMajor ? m_innerSize : m_outerSize; }
void Eigen::SparseMatrix< _Scalar, _Options, _Index >::conservativeResize | ( | Index | rows, |
Index | cols | ||
) | [inline] |
Resizes the matrix to a rows x cols matrix leaving old values untouched.
If the sizes of the matrix are decreased, then the matrix is turned to uncompressed-mode and the storage of the out of bounds coefficients is kept and reserved. Call makeCompressed() to pack the entries and squeeze extra memory.
Definition at line 548 of file SparseMatrix.h.
{ // No change if (this->rows() == rows && this->cols() == cols) return; // If one dimension is null, then there is nothing to be preserved if(rows==0 || cols==0) return resize(rows,cols); Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows(); Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols(); StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows); // Deals with inner non zeros if (m_innerNonZeros) { // Resize m_innerNonZeros StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex))); if (!newInnerNonZeros) internal::throw_std_bad_alloc(); m_innerNonZeros = newInnerNonZeros; for(Index i=m_outerSize; i<m_outerSize+outerChange; i++) m_innerNonZeros[i] = 0; } else if (innerChange < 0) { // Inner size decreased: allocate a new m_innerNonZeros m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex))); if (!m_innerNonZeros) internal::throw_std_bad_alloc(); for(Index i = 0; i < m_outerSize; i++) m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; } // Change the m_innerNonZeros in case of a decrease of inner size if (m_innerNonZeros && innerChange < 0) { for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++) { StorageIndex &n = m_innerNonZeros[i]; StorageIndex start = m_outerIndex[i]; while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n; } } m_innerSize = newInnerSize; // Re-allocate outer index structure if necessary if (outerChange == 0) return; StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex))); if (!newOuterIndex) internal::throw_std_bad_alloc(); m_outerIndex = newOuterIndex; if (outerChange > 0) { StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize]; for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++) m_outerIndex[i] = last; } m_outerSize += outerChange; }
Storage& Eigen::SparseMatrix< _Scalar, _Options, _Index >::data | ( | ) | [inline] |
Definition at line 177 of file SparseMatrix.h.
{ return m_data; }
const Storage& Eigen::SparseMatrix< _Scalar, _Options, _Index >::data | ( | ) | const [inline] |
Definition at line 179 of file SparseMatrix.h.
{ return m_data; }
const ConstDiagonalReturnType Eigen::SparseMatrix< _Scalar, _Options, _Index >::diagonal | ( | ) | const [inline] |
Definition at line 645 of file SparseMatrix.h.
{ return ConstDiagonalReturnType(*this); }
DiagonalReturnType Eigen::SparseMatrix< _Scalar, _Options, _Index >::diagonal | ( | ) | [inline] |
Definition at line 651 of file SparseMatrix.h.
{ return DiagonalReturnType(*this); }
void Eigen::SparseMatrix< _Scalar, _Options, _Index >::finalize | ( | ) | [inline] |
Must be called after inserting a set of non zero entries using the low level compressed API.
Definition at line 417 of file SparseMatrix.h.
{ if(isCompressed()) { StorageIndex size = internal::convert_index<StorageIndex>(m_data.size()); Index i = m_outerSize; // find the last filled column while (i>=0 && m_outerIndex[i]==0) --i; ++i; while (i<=m_outerSize) { m_outerIndex[i] = size; ++i; } } }
void Eigen::SparseMatrix< _Scalar, _Options, _Index >::initAssignment | ( | const Other & | other | ) | [inline, protected] |
Definition at line 837 of file SparseMatrix.h.
{ resize(other.rows(), other.cols()); if(m_innerNonZeros) { std::free(m_innerNonZeros); m_innerNonZeros = 0; } }
const StorageIndex* Eigen::SparseMatrix< _Scalar, _Options, _Index >::innerIndexPtr | ( | ) | const [inline] |
Reimplemented from Eigen::SparseCompressedBase< SparseMatrix< _Scalar, _Options, _Index > >.
Definition at line 152 of file SparseMatrix.h.
StorageIndex* Eigen::SparseMatrix< _Scalar, _Options, _Index >::innerIndexPtr | ( | ) | [inline] |
Reimplemented from Eigen::SparseCompressedBase< SparseMatrix< _Scalar, _Options, _Index > >.
Definition at line 156 of file SparseMatrix.h.
const StorageIndex* Eigen::SparseMatrix< _Scalar, _Options, _Index >::innerNonZeroPtr | ( | ) | const [inline] |
Reimplemented from Eigen::SparseCompressedBase< SparseMatrix< _Scalar, _Options, _Index > >.
Definition at line 170 of file SparseMatrix.h.
{ return m_innerNonZeros; }
StorageIndex* Eigen::SparseMatrix< _Scalar, _Options, _Index >::innerNonZeroPtr | ( | ) | [inline] |
Reimplemented from Eigen::SparseCompressedBase< SparseMatrix< _Scalar, _Options, _Index > >.
Definition at line 174 of file SparseMatrix.h.
{ return m_innerNonZeros; }
Index Eigen::SparseMatrix< _Scalar, _Options, _Index >::innerSize | ( | ) | const [inline] |
Reimplemented from Eigen::SparseMatrixBase< SparseMatrix< _Scalar, _Options, _Index > >.
Definition at line 136 of file SparseMatrix.h.
{ return m_innerSize; }
SparseMatrix< _Scalar, _Options, _Index >::Scalar & Eigen::SparseMatrix< _Scalar, _Options, _Index >::insert | ( | Index | row, |
Index | col | ||
) |
If the matrix *this
is in compressed mode, then *this
is turned into uncompressed mode while reserving room for 2 x this->innerSize() non zeros if reserve(Index) has not been called earlier. In this case, the insertion procedure is optimized for a sequential insertion mode where elements are assumed to be inserted by increasing outer-indices.
If that's not the case, then it is strongly recommended to either use a triplet-list to assemble the matrix, or to first call reserve(const SizesType &) to reserve the appropriate number of non-zero elements per inner vector.
Assuming memory has been appropriately reserved, this function performs a sorted insertion in O(1) if the elements of each inner vector are inserted in increasing inner index order, and in O(nnz_j) for a random insertion.
Definition at line 1116 of file SparseMatrix.h.
{ eigen_assert(row>=0 && row<rows() && col>=0 && col<cols()); const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; if(isCompressed()) { if(nonZeros()==0) { // reserve space if not already done if(m_data.allocatedSize()==0) m_data.reserve(2*m_innerSize); // turn the matrix into non-compressed mode m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex))); if(!m_innerNonZeros) internal::throw_std_bad_alloc(); memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex)); // pack all inner-vectors to the end of the pre-allocated space // and allocate the entire free-space to the first inner-vector StorageIndex end = convert_index(m_data.allocatedSize()); for(Index j=1; j<=m_outerSize; ++j) m_outerIndex[j] = end; } else { // turn the matrix into non-compressed mode m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex))); if(!m_innerNonZeros) internal::throw_std_bad_alloc(); for(Index j=0; j<m_outerSize; ++j) m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j]; } } // check whether we can do a fast "push back" insertion Index data_end = m_data.allocatedSize(); // First case: we are filling a new inner vector which is packed at the end. // We assume that all remaining inner-vectors are also empty and packed to the end. if(m_outerIndex[outer]==data_end) { eigen_internal_assert(m_innerNonZeros[outer]==0); // pack previous empty inner-vectors to end of the used-space // and allocate the entire free-space to the current inner-vector. StorageIndex p = convert_index(m_data.size()); Index j = outer; while(j>=0 && m_innerNonZeros[j]==0) m_outerIndex[j--] = p; // push back the new element ++m_innerNonZeros[outer]; m_data.append(Scalar(0), inner); // check for reallocation if(data_end != m_data.allocatedSize()) { // m_data has been reallocated // -> move remaining inner-vectors back to the end of the free-space // so that the entire free-space is allocated to the current inner-vector. eigen_internal_assert(data_end < m_data.allocatedSize()); StorageIndex new_end = convert_index(m_data.allocatedSize()); for(Index k=outer+1; k<=m_outerSize; ++k) if(m_outerIndex[k]==data_end) m_outerIndex[k] = new_end; } return m_data.value(p); } // Second case: the next inner-vector is packed to the end // and the current inner-vector end match the used-space. if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size()) { eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0); // add space for the new element ++m_innerNonZeros[outer]; m_data.resize(m_data.size()+1); // check for reallocation if(data_end != m_data.allocatedSize()) { // m_data has been reallocated // -> move remaining inner-vectors back to the end of the free-space // so that the entire free-space is allocated to the current inner-vector. eigen_internal_assert(data_end < m_data.allocatedSize()); StorageIndex new_end = convert_index(m_data.allocatedSize()); for(Index k=outer+1; k<=m_outerSize; ++k) if(m_outerIndex[k]==data_end) m_outerIndex[k] = new_end; } // and insert it at the right position (sorted insertion) Index startId = m_outerIndex[outer]; Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1; while ( (p > startId) && (m_data.index(p-1) > inner) ) { m_data.index(p) = m_data.index(p-1); m_data.value(p) = m_data.value(p-1); --p; } m_data.index(p) = convert_index(inner); return (m_data.value(p) = 0); } if(m_data.size() != m_data.allocatedSize()) { // make sure the matrix is compatible to random un-compressed insertion: m_data.resize(m_data.allocatedSize()); this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2)); } return insertUncompressed(row,col); }
Scalar& Eigen::SparseMatrix< _Scalar, _Options, _Index >::insertBack | ( | Index | row, |
Index | col | ||
) | [inline] |
Before filling a given inner vector you must call the statVec(Index) function.
After an insertion session, you should call the finalize() function.
Definition at line 378 of file SparseMatrix.h.
{ return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row); }
Scalar& Eigen::SparseMatrix< _Scalar, _Options, _Index >::insertBackByOuterInner | ( | Index | outer, |
Index | inner | ||
) | [inline] |
Definition at line 385 of file SparseMatrix.h.
{ eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)"); eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)"); Index p = m_outerIndex[outer+1]; ++m_outerIndex[outer+1]; m_data.append(Scalar(0), inner); return m_data.value(p); }
Scalar& Eigen::SparseMatrix< _Scalar, _Options, _Index >::insertBackByOuterInnerUnordered | ( | Index | outer, |
Index | inner | ||
) | [inline] |
Definition at line 397 of file SparseMatrix.h.
{ Index p = m_outerIndex[outer+1]; ++m_outerIndex[outer+1]; m_data.append(Scalar(0), inner); return m_data.value(p); }
EIGEN_STRONG_INLINE Scalar& Eigen::SparseMatrix< _Scalar, _Options, _Index >::insertBackUncompressed | ( | Index | row, |
Index | col | ||
) | [inline] |
Definition at line 873 of file SparseMatrix.h.
{ const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; eigen_assert(!isCompressed()); eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer])); Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++; m_data.index(p) = convert_index(inner); return (m_data.value(p) = 0); }
Scalar& Eigen::SparseMatrix< _Scalar, _Options, _Index >::insertByOuterInner | ( | Index | j, |
Index | i | ||
) | [inline] |
same as insert(Index,Index) except that the indices are given relative to the storage order
Definition at line 452 of file SparseMatrix.h.
{ return insert(IsRowMajor ? j : i, IsRowMajor ? i : j); }
EIGEN_DONT_INLINE SparseMatrix< _Scalar, _Options, _Index >::Scalar & Eigen::SparseMatrix< _Scalar, _Options, _Index >::insertCompressed | ( | Index | row, |
Index | col | ||
) | [protected] |
Definition at line 1268 of file SparseMatrix.h.
{ eigen_assert(isCompressed()); const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; Index previousOuter = outer; if (m_outerIndex[outer+1]==0) { // we start a new inner vector while (previousOuter>=0 && m_outerIndex[previousOuter]==0) { m_outerIndex[previousOuter] = convert_index(m_data.size()); --previousOuter; } m_outerIndex[outer+1] = m_outerIndex[outer]; } // here we have to handle the tricky case where the outerIndex array // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g., // the 2nd inner vector... bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0)) && (size_t(m_outerIndex[outer+1]) == m_data.size()); size_t startId = m_outerIndex[outer]; // FIXME let's make sure sizeof(long int) == sizeof(size_t) size_t p = m_outerIndex[outer+1]; ++m_outerIndex[outer+1]; double reallocRatio = 1; if (m_data.allocatedSize()<=m_data.size()) { // if there is no preallocated memory, let's reserve a minimum of 32 elements if (m_data.size()==0) { m_data.reserve(32); } else { // we need to reallocate the data, to reduce multiple reallocations // we use a smart resize algorithm based on the current filling ratio // in addition, we use double to avoid integers overflows double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1); reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size()); // furthermore we bound the realloc ratio to: // 1) reduce multiple minor realloc when the matrix is almost filled // 2) avoid to allocate too much memory when the matrix is almost empty reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.); } } m_data.resize(m_data.size()+1,reallocRatio); if (!isLastVec) { if (previousOuter==-1) { // oops wrong guess. // let's correct the outer offsets for (Index k=0; k<=(outer+1); ++k) m_outerIndex[k] = 0; Index k=outer+1; while(m_outerIndex[k]==0) m_outerIndex[k++] = 1; while (k<=m_outerSize && m_outerIndex[k]!=0) m_outerIndex[k++]++; p = 0; --k; k = m_outerIndex[k]-1; while (k>0) { m_data.index(k) = m_data.index(k-1); m_data.value(k) = m_data.value(k-1); k--; } } else { // we are not inserting into the last inner vec // update outer indices: Index j = outer+2; while (j<=m_outerSize && m_outerIndex[j]!=0) m_outerIndex[j++]++; --j; // shift data of last vecs: Index k = m_outerIndex[j]-1; while (k>=Index(p)) { m_data.index(k) = m_data.index(k-1); m_data.value(k) = m_data.value(k-1); k--; } } } while ( (p > startId) && (m_data.index(p-1) > inner) ) { m_data.index(p) = m_data.index(p-1); m_data.value(p) = m_data.value(p-1); --p; } m_data.index(p) = inner; return (m_data.value(p) = 0); }
EIGEN_DONT_INLINE SparseMatrix< _Scalar, _Options, _Index >::Scalar & Eigen::SparseMatrix< _Scalar, _Options, _Index >::insertUncompressed | ( | Index | row, |
Index | col | ||
) | [protected] |
Definition at line 1236 of file SparseMatrix.h.
{ eigen_assert(!isCompressed()); const Index outer = IsRowMajor ? row : col; const StorageIndex inner = convert_index(IsRowMajor ? col : row); Index room = m_outerIndex[outer+1] - m_outerIndex[outer]; StorageIndex innerNNZ = m_innerNonZeros[outer]; if(innerNNZ>=room) { // this inner vector is full, we need to reallocate the whole buffer :( reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ))); } Index startId = m_outerIndex[outer]; Index p = startId + m_innerNonZeros[outer]; while ( (p > startId) && (m_data.index(p-1) > inner) ) { m_data.index(p) = m_data.index(p-1); m_data.value(p) = m_data.value(p-1); --p; } eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end"); m_innerNonZeros[outer]++; m_data.index(p) = inner; return (m_data.value(p) = 0); }
void Eigen::SparseMatrix< _Scalar, _Options, _Index >::makeCompressed | ( | ) | [inline] |
Turns the matrix into the compressed format.
Definition at line 459 of file SparseMatrix.h.
{ if(isCompressed()) return; eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0); Index oldStart = m_outerIndex[1]; m_outerIndex[1] = m_innerNonZeros[0]; for(Index j=1; j<m_outerSize; ++j) { Index nextOldStart = m_outerIndex[j+1]; Index offset = oldStart - m_outerIndex[j]; if(offset>0) { for(Index k=0; k<m_innerNonZeros[j]; ++k) { m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k); m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k); } } m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j]; oldStart = nextOldStart; } std::free(m_innerNonZeros); m_innerNonZeros = 0; m_data.resize(m_outerIndex[m_outerSize]); m_data.squeeze(); }
SparseMatrix& Eigen::SparseMatrix< _Scalar, _Options, _Index >::operator= | ( | const SparseMatrix< _Scalar, _Options, _Index > & | other | ) | [inline] |
Reimplemented from Eigen::SparseMatrixBase< SparseMatrix< _Scalar, _Options, _Index > >.
Definition at line 749 of file SparseMatrix.h.
{ if (other.isRValue()) { swap(other.const_cast_derived()); } else if(this!=&other) { #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN #endif initAssignment(other); if(other.isCompressed()) { internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex); m_data = other.m_data; } else { Base::operator=(other); } } return *this; }
SparseMatrix& Eigen::SparseMatrix< _Scalar, _Options, _Index >::operator= | ( | const EigenBase< OtherDerived > & | other | ) | [inline] |
Reimplemented from Eigen::SparseMatrixBase< SparseMatrix< _Scalar, _Options, _Index > >.
Definition at line 776 of file SparseMatrix.h.
{ return Base::operator=(other.derived()); }
EIGEN_DONT_INLINE SparseMatrix< Scalar, _Options, _Index > & Eigen::SparseMatrix< Scalar, _Options, _Index >::operator= | ( | const SparseMatrixBase< OtherDerived > & | other | ) |
Reimplemented from Eigen::SparseMatrixBase< SparseMatrix< _Scalar, _Options, _Index > >.
Definition at line 1044 of file SparseMatrix.h.
{ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN #endif const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit); if (needToTranspose) { #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN #endif // two passes algorithm: // 1 - compute the number of coeffs per dest inner vector // 2 - do the actual copy/eval // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy; typedef typename internal::remove_all<OtherCopy>::type _OtherCopy; typedef internal::evaluator<_OtherCopy> OtherCopyEval; OtherCopy otherCopy(other.derived()); OtherCopyEval otherCopyEval(otherCopy); SparseMatrix dest(other.rows(),other.cols()); Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero(); // pass 1 // FIXME the above copy could be merged with that pass for (Index j=0; j<otherCopy.outerSize(); ++j) for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it) ++dest.m_outerIndex[it.index()]; // prefix sum StorageIndex count = 0; IndexVector positions(dest.outerSize()); for (Index j=0; j<dest.outerSize(); ++j) { Index tmp = dest.m_outerIndex[j]; dest.m_outerIndex[j] = count; positions[j] = count; count += tmp; } dest.m_outerIndex[dest.outerSize()] = count; // alloc dest.m_data.resize(count); // pass 2 for (StorageIndex j=0; j<otherCopy.outerSize(); ++j) { for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it) { Index pos = positions[it.index()]++; dest.m_data.index(pos) = j; dest.m_data.value(pos) = it.value(); } } this->swap(dest); return *this; } else { if(other.isRValue()) { initAssignment(other.derived()); } // there is no special optimization return Base::operator=(other.derived()); } }
const StorageIndex* Eigen::SparseMatrix< _Scalar, _Options, _Index >::outerIndexPtr | ( | ) | const [inline] |
Reimplemented from Eigen::SparseCompressedBase< SparseMatrix< _Scalar, _Options, _Index > >.
Definition at line 161 of file SparseMatrix.h.
{ return m_outerIndex; }
StorageIndex* Eigen::SparseMatrix< _Scalar, _Options, _Index >::outerIndexPtr | ( | ) | [inline] |
Reimplemented from Eigen::SparseCompressedBase< SparseMatrix< _Scalar, _Options, _Index > >.
Definition at line 165 of file SparseMatrix.h.
{ return m_outerIndex; }
Index Eigen::SparseMatrix< _Scalar, _Options, _Index >::outerSize | ( | ) | const [inline] |
Reimplemented from Eigen::SparseMatrixBase< SparseMatrix< _Scalar, _Options, _Index > >.
Definition at line 138 of file SparseMatrix.h.
{ return m_outerSize; }
void Eigen::SparseMatrix< _Scalar, _Options, _Index >::prune | ( | const Scalar & | reference, |
const RealScalar & | epsilon = NumTraits<RealScalar>::dummy_precision() |
||
) | [inline] |
Suppresses all nonzeros which are much smaller than reference under the tolerence epsilon
Definition at line 502 of file SparseMatrix.h.
{ prune(default_prunning_func(reference,epsilon)); }
void Eigen::SparseMatrix< _Scalar, _Options, _Index >::prune | ( | const KeepFunc & | keep = KeepFunc() | ) | [inline] |
Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate keep. The functor type KeepFunc must implement the following function:
Definition at line 515 of file SparseMatrix.h.
{ // TODO optimize the uncompressed mode to avoid moving and allocating the data twice makeCompressed(); StorageIndex k = 0; for(Index j=0; j<m_outerSize; ++j) { Index previousStart = m_outerIndex[j]; m_outerIndex[j] = k; Index end = m_outerIndex[j+1]; for(Index i=previousStart; i<end; ++i) { if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i))) { m_data.value(k) = m_data.value(i); m_data.index(k) = m_data.index(i); ++k; } } } m_outerIndex[m_outerSize] = k; m_data.resize(k,0); }
void Eigen::SparseMatrix< _Scalar, _Options, _Index >::reserve | ( | Index | reserveSize | ) | [inline] |
Preallocates reserveSize non zeros.
Precondition: the matrix must be in compressed mode.
Definition at line 257 of file SparseMatrix.h.
{ eigen_assert(isCompressed() && "This function does not make sense in non compressed mode."); m_data.reserve(reserveSize); }
void Eigen::SparseMatrix< _Scalar, _Options, _Index >::reserve | ( | const SizesType & | reserveSizes, |
const typename SizesType::value_type & | enableif = typename SizesType::value_type() |
||
) | [inline] |
Definition at line 280 of file SparseMatrix.h.
{ EIGEN_UNUSED_VARIABLE(enableif); reserveInnerVectors(reserveSizes); }
void Eigen::SparseMatrix< _Scalar, _Options, _Index >::reserveInnerVectors | ( | const SizesType & | reserveSizes | ) | [inline, protected] |
Definition at line 292 of file SparseMatrix.h.
{ if(isCompressed()) { Index totalReserveSize = 0; // turn the matrix into non-compressed mode m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex))); if (!m_innerNonZeros) internal::throw_std_bad_alloc(); // temporarily use m_innerSizes to hold the new starting points. StorageIndex* newOuterIndex = m_innerNonZeros; StorageIndex count = 0; for(Index j=0; j<m_outerSize; ++j) { newOuterIndex[j] = count; count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]); totalReserveSize += reserveSizes[j]; } m_data.reserve(totalReserveSize); StorageIndex previousOuterIndex = m_outerIndex[m_outerSize]; for(Index j=m_outerSize-1; j>=0; --j) { StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j]; for(Index i=innerNNZ-1; i>=0; --i) { m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i); m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i); } previousOuterIndex = m_outerIndex[j]; m_outerIndex[j] = newOuterIndex[j]; m_innerNonZeros[j] = innerNNZ; } m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1]; m_data.resize(m_outerIndex[m_outerSize]); } else { StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex))); if (!newOuterIndex) internal::throw_std_bad_alloc(); StorageIndex count = 0; for(Index j=0; j<m_outerSize; ++j) { newOuterIndex[j] = count; StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j]; StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved); count += toReserve + m_innerNonZeros[j]; } newOuterIndex[m_outerSize] = count; m_data.resize(count); for(Index j=m_outerSize-1; j>=0; --j) { Index offset = newOuterIndex[j] - m_outerIndex[j]; if(offset>0) { StorageIndex innerNNZ = m_innerNonZeros[j]; for(Index i=innerNNZ-1; i>=0; --i) { m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i); m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i); } } } std::swap(m_outerIndex, newOuterIndex); std::free(newOuterIndex); } }
void Eigen::SparseMatrix< _Scalar, _Options, _Index >::resize | ( | Index | rows, |
Index | cols | ||
) | [inline] |
Resizes the matrix to a rows x cols matrix and initializes it to zero.
This function does not free the currently allocated memory. To release as much as memory as possible, call
mat.data().squeeze();
after resizing it.
Definition at line 616 of file SparseMatrix.h.
{ const Index outerSize = IsRowMajor ? rows : cols; m_innerSize = IsRowMajor ? cols : rows; m_data.clear(); if (m_outerSize != outerSize || m_outerSize==0) { std::free(m_outerIndex); m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex))); if (!m_outerIndex) internal::throw_std_bad_alloc(); m_outerSize = outerSize; } if(m_innerNonZeros) { std::free(m_innerNonZeros); m_innerNonZeros = 0; } memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex)); }
void Eigen::SparseMatrix< _Scalar, _Options, _Index >::resizeNonZeros | ( | Index | size | ) | [inline] |
Resize the nonzero vector to size
Definition at line 639 of file SparseMatrix.h.
Index Eigen::SparseMatrix< _Scalar, _Options, _Index >::rows | ( | void | ) | const [inline] |
Reimplemented from Eigen::SparseMatrixBase< SparseMatrix< _Scalar, _Options, _Index > >.
Definition at line 131 of file SparseMatrix.h.
{ return IsRowMajor ? m_outerSize : m_innerSize; }
void Eigen::SparseMatrix< Scalar, _Options, _Index >::setFromTriplets | ( | const InputIterators & | begin, |
const InputIterators & | end | ||
) |
Fill the matrix *this
with the list of triplets defined by the iterator range begin - end.
A triplet is a tuple (i,j,value) defining a non-zero element. The input list of triplets does not have to be sorted, and can contains duplicated elements. In any case, the result is a sorted and compressed sparse matrix where the duplicates have been summed up. This is a O(n) operation, with n the number of triplet elements. The initial contents of *this
is destroyed. The matrix *this
must be properly resized beforehand using the SparseMatrix(Index,Index) constructor, or the resize(Index,Index) method. The sizes are not extracted from the triplet list.
The InputIterators value_type must provide the following interface:
Scalar value() const; // the value Scalar row() const; // the row index i Scalar col() const; // the column index j
See for instance the Eigen::Triplet template class.
Here is a typical usage example:
typedef Triplet<double> T; std::vector<T> tripletList; triplets.reserve(estimation_of_entries); for(...) { // ... tripletList.push_back(T(i,j,v_ij)); } SparseMatrixType m(rows,cols); m.setFromTriplets(tripletList.begin(), tripletList.end()); // m is ready to go!
Definition at line 980 of file SparseMatrix.h.
{
internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_Index> >(begin, end, *this, internal::scalar_sum_op<Scalar>());
}
void Eigen::SparseMatrix< Scalar, _Options, _Index >::setFromTriplets | ( | const InputIterators & | begin, |
const InputIterators & | end, | ||
DupFunctor | dup_func | ||
) |
The same as setFromTriplets but when duplicates are met the functor dup_func is applied:
value = dup_func(OldValue, NewValue)
Here is a C++11 example keeping the latest entry only:
mat.setFromTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
Definition at line 996 of file SparseMatrix.h.
{
internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_Index>, DupFunctor>(begin, end, *this, dup_func);
}
void Eigen::SparseMatrix< _Scalar, _Options, _Index >::setIdentity | ( | ) | [inline] |
Sets *this to the identity matrix. This function also turns the matrix into compressed mode, and drop any reserved memory.
Definition at line 739 of file SparseMatrix.h.
{ eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES"); this->m_data.resize(rows()); Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1)); Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes(); Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows())); std::free(m_innerNonZeros); m_innerNonZeros = 0; }
void Eigen::SparseMatrix< _Scalar, _Options, _Index >::setZero | ( | ) | [inline] |
Removes all non zeros but keep allocated memory
This function does not free the currently allocated memory. To release as much as memory as possible, call
mat.data().squeeze();
after resizing it.
Definition at line 246 of file SparseMatrix.h.
{ m_data.clear(); memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex)); if(m_innerNonZeros) memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex)); }
void Eigen::SparseMatrix< _Scalar, _Options, _Index >::startVec | ( | Index | outer | ) | [inline] |
Definition at line 407 of file SparseMatrix.h.
{ eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially"); eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially"); m_outerIndex[outer+1] = m_outerIndex[outer]; }
internal::traits< SparseMatrix< _Scalar, _Options, _Index > >::Scalar Eigen::SparseMatrix< _Scalar, _Options, _Index >::sum | ( | ) | const |
Overloaded for performance
Reimplemented from Eigen::SparseMatrixBase< SparseMatrix< _Scalar, _Options, _Index > >.
Definition at line 30 of file SparseRedux.h.
{ eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix"); return Matrix<Scalar,1,Dynamic>::Map(m_data.valuePtr(), m_data.size()).sum(); }
void Eigen::SparseMatrix< _Scalar, _Options, _Index >::sumupDuplicates | ( | ) | [inline] |
Definition at line 443 of file SparseMatrix.h.
{ collapseDuplicates(internal::scalar_sum_op<Scalar>()); }
void Eigen::SparseMatrix< _Scalar, _Options, _Index >::swap | ( | SparseMatrix< _Scalar, _Options, _Index > & | other | ) | [inline] |
Swaps the content of two sparse matrices of the same type. This is a fast operation that simply swaps the underlying pointers and parameters.
Definition at line 727 of file SparseMatrix.h.
{ //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n"); std::swap(m_outerIndex, other.m_outerIndex); std::swap(m_innerSize, other.m_innerSize); std::swap(m_outerSize, other.m_outerSize); std::swap(m_innerNonZeros, other.m_innerNonZeros); m_data.swap(other.m_data); }
void Eigen::SparseMatrix< _Scalar, _Options, _Index >::uncompress | ( | ) | [inline] |
Turns the matrix into the uncompressed mode
Definition at line 490 of file SparseMatrix.h.
{ if(m_innerNonZeros != 0) return; m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex))); for (Index i = 0; i < m_outerSize; i++) { m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; } }
const Scalar* Eigen::SparseMatrix< _Scalar, _Options, _Index >::valuePtr | ( | ) | const [inline] |
Reimplemented from Eigen::SparseCompressedBase< SparseMatrix< _Scalar, _Options, _Index > >.
Definition at line 143 of file SparseMatrix.h.
Scalar* Eigen::SparseMatrix< _Scalar, _Options, _Index >::valuePtr | ( | ) | [inline] |
Reimplemented from Eigen::SparseCompressedBase< SparseMatrix< _Scalar, _Options, _Index > >.
Definition at line 147 of file SparseMatrix.h.
std::ostream& operator<< | ( | std::ostream & | s, |
const SparseMatrix< _Scalar, _Options, _Index > & | m | ||
) | [friend] |
Definition at line 783 of file SparseMatrix.h.
{ EIGEN_DBG_SPARSE( s << "Nonzero entries:\n"; if(m.isCompressed()) for (Index i=0; i<m.nonZeros(); ++i) s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") "; else for (Index i=0; i<m.outerSize(); ++i) { Index p = m.m_outerIndex[i]; Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i]; Index k=p; for (; k<pe; ++k) s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") "; for (; k<m.m_outerIndex[i+1]; ++k) s << "(_,_) "; } s << std::endl; s << std::endl; s << "Outer pointers:\n"; for (Index i=0; i<m.outerSize(); ++i) s << m.m_outerIndex[i] << " "; s << " $" << std::endl; if(!m.isCompressed()) { s << "Inner non zeros:\n"; for (Index i=0; i<m.outerSize(); ++i) s << m.m_innerNonZeros[i] << " "; s << " $" << std::endl; } s << std::endl; ); s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m); return s; }
Storage Eigen::SparseMatrix< _Scalar, _Options, _Index >::m_data [protected] |
Definition at line 126 of file SparseMatrix.h.
StorageIndex* Eigen::SparseMatrix< _Scalar, _Options, _Index >::m_innerNonZeros [protected] |
Definition at line 125 of file SparseMatrix.h.
Index Eigen::SparseMatrix< _Scalar, _Options, _Index >::m_innerSize [protected] |
Definition at line 123 of file SparseMatrix.h.
StorageIndex* Eigen::SparseMatrix< _Scalar, _Options, _Index >::m_outerIndex [protected] |
Definition at line 124 of file SparseMatrix.h.
Index Eigen::SparseMatrix< _Scalar, _Options, _Index >::m_outerSize [protected] |
Definition at line 122 of file SparseMatrix.h.