MOAB  4.9.3pre
SparseMatrix.h
Go to the documentation of this file.
00001 // This file is part of Eigen, a lightweight C++ template library
00002 // for linear algebra.
00003 //
00004 // Copyright (C) 2008-2014 Gael Guennebaud <[email protected]>
00005 //
00006 // This Source Code Form is subject to the terms of the Mozilla
00007 // Public License v. 2.0. If a copy of the MPL was not distributed
00008 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
00009 
00010 #ifndef EIGEN_SPARSEMATRIX_H
00011 #define EIGEN_SPARSEMATRIX_H
00012 
00013 namespace Eigen { 
00014 
00041 namespace internal {
00042 template<typename _Scalar, int _Options, typename _Index>
00043 struct traits<SparseMatrix<_Scalar, _Options, _Index> >
00044 {
00045   typedef _Scalar Scalar;
00046   typedef _Index StorageIndex;
00047   typedef Sparse StorageKind;
00048   typedef MatrixXpr XprKind;
00049   enum {
00050     RowsAtCompileTime = Dynamic,
00051     ColsAtCompileTime = Dynamic,
00052     MaxRowsAtCompileTime = Dynamic,
00053     MaxColsAtCompileTime = Dynamic,
00054     Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
00055     SupportedAccessPatterns = InnerRandomAccessPattern
00056   };
00057 };
00058 
00059 template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
00060 struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
00061 {
00062   typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType;
00063   typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
00064   typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
00065 
00066   typedef _Scalar Scalar;
00067   typedef Dense StorageKind;
00068   typedef _Index StorageIndex;
00069   typedef MatrixXpr XprKind;
00070 
00071   enum {
00072     RowsAtCompileTime = Dynamic,
00073     ColsAtCompileTime = 1,
00074     MaxRowsAtCompileTime = Dynamic,
00075     MaxColsAtCompileTime = 1,
00076     Flags = LvalueBit
00077   };
00078 };
00079 
00080 template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
00081 struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
00082  : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
00083 {
00084   enum {
00085     Flags = 0
00086   };
00087 };
00088 
00089 } // end namespace internal
00090 
00091 template<typename _Scalar, int _Options, typename _Index>
00092 class SparseMatrix
00093   : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _Index> >
00094 {
00095     typedef SparseCompressedBase<SparseMatrix> Base;
00096     using Base::convert_index;
00097   public:
00098     using Base::isCompressed;
00099     using Base::nonZeros;
00100     EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
00101     using Base::operator+=;
00102     using Base::operator-=;
00103 
00104     typedef MappedSparseMatrix<Scalar,Flags> Map;
00105     typedef Diagonal<SparseMatrix> DiagonalReturnType;
00106     typedef Diagonal<const SparseMatrix> ConstDiagonalReturnType;
00107     typedef typename Base::InnerIterator InnerIterator;
00108     typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
00109     
00110 
00111     using Base::IsRowMajor;
00112     typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
00113     enum {
00114       Options = _Options
00115     };
00116 
00117     typedef typename Base::IndexVector IndexVector;
00118     typedef typename Base::ScalarVector ScalarVector;
00119   protected:
00120     typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
00121 
00122     Index m_outerSize;
00123     Index m_innerSize;
00124     StorageIndex* m_outerIndex;
00125     StorageIndex* m_innerNonZeros;     // optional, if null then the data is compressed
00126     Storage m_data;
00127 
00128   public:
00129     
00131     inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
00133     inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
00134 
00136     inline Index innerSize() const { return m_innerSize; }
00138     inline Index outerSize() const { return m_outerSize; }
00139     
00143     inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
00147     inline Scalar* valuePtr() { return m_data.valuePtr(); }
00148 
00152     inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
00156     inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
00157 
00161     inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
00165     inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
00166 
00170     inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
00174     inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
00175 
00177     inline Storage& data() { return m_data; }
00179     inline const Storage& data() const { return m_data; }
00180 
00183     inline Scalar coeff(Index row, Index col) const
00184     {
00185       eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
00186       
00187       const Index outer = IsRowMajor ? row : col;
00188       const Index inner = IsRowMajor ? col : row;
00189       Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
00190       return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
00191     }
00192 
00201     inline Scalar& coeffRef(Index row, Index col)
00202     {
00203       eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
00204       
00205       const Index outer = IsRowMajor ? row : col;
00206       const Index inner = IsRowMajor ? col : row;
00207 
00208       Index start = m_outerIndex[outer];
00209       Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
00210       eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
00211       if(end<=start)
00212         return insert(row,col);
00213       const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
00214       if((p<end) && (m_data.index(p)==inner))
00215         return m_data.value(p);
00216       else
00217         return insert(row,col);
00218     }
00219 
00235     Scalar& insert(Index row, Index col);
00236 
00237   public:
00238 
00246     inline void setZero()
00247     {
00248       m_data.clear();
00249       memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
00250       if(m_innerNonZeros)
00251         memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
00252     }
00253 
00257     inline void reserve(Index reserveSize)
00258     {
00259       eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
00260       m_data.reserve(reserveSize);
00261     }
00262     
00263     #ifdef EIGEN_PARSED_BY_DOXYGEN
00264 
00276     template<class SizesType>
00277     inline void reserve(const SizesType& reserveSizes);
00278     #else
00279     template<class SizesType>
00280     inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
00281     #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
00282         typename
00283     #endif
00284         SizesType::value_type())
00285     {
00286       EIGEN_UNUSED_VARIABLE(enableif);
00287       reserveInnerVectors(reserveSizes);
00288     }
00289     #endif // EIGEN_PARSED_BY_DOXYGEN
00290   protected:
00291     template<class SizesType>
00292     inline void reserveInnerVectors(const SizesType& reserveSizes)
00293     {
00294       if(isCompressed())
00295       {
00296         Index totalReserveSize = 0;
00297         // turn the matrix into non-compressed mode
00298         m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
00299         if (!m_innerNonZeros) internal::throw_std_bad_alloc();
00300         
00301         // temporarily use m_innerSizes to hold the new starting points.
00302         StorageIndex* newOuterIndex = m_innerNonZeros;
00303         
00304         StorageIndex count = 0;
00305         for(Index j=0; j<m_outerSize; ++j)
00306         {
00307           newOuterIndex[j] = count;
00308           count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
00309           totalReserveSize += reserveSizes[j];
00310         }
00311         m_data.reserve(totalReserveSize);
00312         StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
00313         for(Index j=m_outerSize-1; j>=0; --j)
00314         {
00315           StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
00316           for(Index i=innerNNZ-1; i>=0; --i)
00317           {
00318             m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
00319             m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
00320           }
00321           previousOuterIndex = m_outerIndex[j];
00322           m_outerIndex[j] = newOuterIndex[j];
00323           m_innerNonZeros[j] = innerNNZ;
00324         }
00325         m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
00326         
00327         m_data.resize(m_outerIndex[m_outerSize]);
00328       }
00329       else
00330       {
00331         StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
00332         if (!newOuterIndex) internal::throw_std_bad_alloc();
00333         
00334         StorageIndex count = 0;
00335         for(Index j=0; j<m_outerSize; ++j)
00336         {
00337           newOuterIndex[j] = count;
00338           StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
00339           StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
00340           count += toReserve + m_innerNonZeros[j];
00341         }
00342         newOuterIndex[m_outerSize] = count;
00343         
00344         m_data.resize(count);
00345         for(Index j=m_outerSize-1; j>=0; --j)
00346         {
00347           Index offset = newOuterIndex[j] - m_outerIndex[j];
00348           if(offset>0)
00349           {
00350             StorageIndex innerNNZ = m_innerNonZeros[j];
00351             for(Index i=innerNNZ-1; i>=0; --i)
00352             {
00353               m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
00354               m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
00355             }
00356           }
00357         }
00358         
00359         std::swap(m_outerIndex, newOuterIndex);
00360         std::free(newOuterIndex);
00361       }
00362       
00363     }
00364   public:
00365 
00366     //--- low level purely coherent filling ---
00367 
00378     inline Scalar& insertBack(Index row, Index col)
00379     {
00380       return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
00381     }
00382 
00385     inline Scalar& insertBackByOuterInner(Index outer, Index inner)
00386     {
00387       eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
00388       eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
00389       Index p = m_outerIndex[outer+1];
00390       ++m_outerIndex[outer+1];
00391       m_data.append(Scalar(0), inner);
00392       return m_data.value(p);
00393     }
00394 
00397     inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
00398     {
00399       Index p = m_outerIndex[outer+1];
00400       ++m_outerIndex[outer+1];
00401       m_data.append(Scalar(0), inner);
00402       return m_data.value(p);
00403     }
00404 
00407     inline void startVec(Index outer)
00408     {
00409       eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
00410       eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
00411       m_outerIndex[outer+1] = m_outerIndex[outer];
00412     }
00413 
00417     inline void finalize()
00418     {
00419       if(isCompressed())
00420       {
00421         StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
00422         Index i = m_outerSize;
00423         // find the last filled column
00424         while (i>=0 && m_outerIndex[i]==0)
00425           --i;
00426         ++i;
00427         while (i<=m_outerSize)
00428         {
00429           m_outerIndex[i] = size;
00430           ++i;
00431         }
00432       }
00433     }
00434 
00435     //---
00436 
00437     template<typename InputIterators>
00438     void setFromTriplets(const InputIterators& begin, const InputIterators& end);
00439 
00440     template<typename InputIterators,typename DupFunctor>
00441     void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
00442 
00443     void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar>()); }
00444 
00445     template<typename DupFunctor>
00446     void collapseDuplicates(DupFunctor dup_func = DupFunctor());
00447 
00448     //---
00449     
00452     Scalar& insertByOuterInner(Index j, Index i)
00453     {
00454       return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
00455     }
00456 
00459     void makeCompressed()
00460     {
00461       if(isCompressed())
00462         return;
00463       
00464       eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
00465       
00466       Index oldStart = m_outerIndex[1];
00467       m_outerIndex[1] = m_innerNonZeros[0];
00468       for(Index j=1; j<m_outerSize; ++j)
00469       {
00470         Index nextOldStart = m_outerIndex[j+1];
00471         Index offset = oldStart - m_outerIndex[j];
00472         if(offset>0)
00473         {
00474           for(Index k=0; k<m_innerNonZeros[j]; ++k)
00475           {
00476             m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
00477             m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
00478           }
00479         }
00480         m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
00481         oldStart = nextOldStart;
00482       }
00483       std::free(m_innerNonZeros);
00484       m_innerNonZeros = 0;
00485       m_data.resize(m_outerIndex[m_outerSize]);
00486       m_data.squeeze();
00487     }
00488 
00490     void uncompress()
00491     {
00492       if(m_innerNonZeros != 0)
00493         return; 
00494       m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
00495       for (Index i = 0; i < m_outerSize; i++)
00496       {
00497         m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; 
00498       }
00499     }
00500     
00502     void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
00503     {
00504       prune(default_prunning_func(reference,epsilon));
00505     }
00506     
00514     template<typename KeepFunc>
00515     void prune(const KeepFunc& keep = KeepFunc())
00516     {
00517       // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
00518       makeCompressed();
00519 
00520       StorageIndex k = 0;
00521       for(Index j=0; j<m_outerSize; ++j)
00522       {
00523         Index previousStart = m_outerIndex[j];
00524         m_outerIndex[j] = k;
00525         Index end = m_outerIndex[j+1];
00526         for(Index i=previousStart; i<end; ++i)
00527         {
00528           if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
00529           {
00530             m_data.value(k) = m_data.value(i);
00531             m_data.index(k) = m_data.index(i);
00532             ++k;
00533           }
00534         }
00535       }
00536       m_outerIndex[m_outerSize] = k;
00537       m_data.resize(k,0);
00538     }
00539 
00548     void conservativeResize(Index rows, Index cols) 
00549     {
00550       // No change
00551       if (this->rows() == rows && this->cols() == cols) return;
00552       
00553       // If one dimension is null, then there is nothing to be preserved
00554       if(rows==0 || cols==0) return resize(rows,cols);
00555 
00556       Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
00557       Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
00558       StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
00559 
00560       // Deals with inner non zeros
00561       if (m_innerNonZeros)
00562       {
00563         // Resize m_innerNonZeros
00564         StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
00565         if (!newInnerNonZeros) internal::throw_std_bad_alloc();
00566         m_innerNonZeros = newInnerNonZeros;
00567         
00568         for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)          
00569           m_innerNonZeros[i] = 0;
00570       } 
00571       else if (innerChange < 0) 
00572       {
00573         // Inner size decreased: allocate a new m_innerNonZeros
00574         m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex)));
00575         if (!m_innerNonZeros) internal::throw_std_bad_alloc();
00576         for(Index i = 0; i < m_outerSize; i++)
00577           m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
00578       }
00579       
00580       // Change the m_innerNonZeros in case of a decrease of inner size
00581       if (m_innerNonZeros && innerChange < 0)
00582       {
00583         for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
00584         {
00585           StorageIndex &n = m_innerNonZeros[i];
00586           StorageIndex start = m_outerIndex[i];
00587           while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n; 
00588         }
00589       }
00590       
00591       m_innerSize = newInnerSize;
00592 
00593       // Re-allocate outer index structure if necessary
00594       if (outerChange == 0)
00595         return;
00596           
00597       StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
00598       if (!newOuterIndex) internal::throw_std_bad_alloc();
00599       m_outerIndex = newOuterIndex;
00600       if (outerChange > 0)
00601       {
00602         StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
00603         for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)          
00604           m_outerIndex[i] = last; 
00605       }
00606       m_outerSize += outerChange;
00607     }
00608     
00616     void resize(Index rows, Index cols)
00617     {
00618       const Index outerSize = IsRowMajor ? rows : cols;
00619       m_innerSize = IsRowMajor ? cols : rows;
00620       m_data.clear();
00621       if (m_outerSize != outerSize || m_outerSize==0)
00622       {
00623         std::free(m_outerIndex);
00624         m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
00625         if (!m_outerIndex) internal::throw_std_bad_alloc();
00626         
00627         m_outerSize = outerSize;
00628       }
00629       if(m_innerNonZeros)
00630       {
00631         std::free(m_innerNonZeros);
00632         m_innerNonZeros = 0;
00633       }
00634       memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
00635     }
00636 
00639     void resizeNonZeros(Index size)
00640     {
00641       m_data.resize(size);
00642     }
00643 
00645     const ConstDiagonalReturnType diagonal() const { return ConstDiagonalReturnType(*this); }
00646     
00651     DiagonalReturnType diagonal() { return DiagonalReturnType(*this); }
00652 
00654     inline SparseMatrix()
00655       : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
00656     {
00657       check_template_parameters();
00658       resize(0, 0);
00659     }
00660 
00662     inline SparseMatrix(Index rows, Index cols)
00663       : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
00664     {
00665       check_template_parameters();
00666       resize(rows, cols);
00667     }
00668 
00670     template<typename OtherDerived>
00671     inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
00672       : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
00673     {
00674       EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
00675         YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
00676       check_template_parameters();
00677       const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
00678       if (needToTranspose)
00679         *this = other.derived();
00680       else
00681       {
00682         #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
00683           EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
00684         #endif
00685         internal::call_assignment_no_alias(*this, other.derived());
00686       }
00687     }
00688     
00690     template<typename OtherDerived, unsigned int UpLo>
00691     inline SparseMatrix(const SparseSelfAdjointView<OtherDerived, UpLo>& other)
00692       : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
00693     {
00694       check_template_parameters();
00695       Base::operator=(other);
00696     }
00697 
00699     inline SparseMatrix(const SparseMatrix& other)
00700       : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
00701     {
00702       check_template_parameters();
00703       *this = other.derived();
00704     }
00705 
00707     template<typename OtherDerived>
00708     SparseMatrix(const ReturnByValue<OtherDerived>& other)
00709       : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
00710     {
00711       check_template_parameters();
00712       initAssignment(other);
00713       other.evalTo(*this);
00714     }
00715     
00717     template<typename OtherDerived>
00718     explicit SparseMatrix(const DiagonalBase<OtherDerived>& other)
00719       : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
00720     {
00721       check_template_parameters();
00722       *this = other.derived();
00723     }
00724 
00727     inline void swap(SparseMatrix& other)
00728     {
00729       //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
00730       std::swap(m_outerIndex, other.m_outerIndex);
00731       std::swap(m_innerSize, other.m_innerSize);
00732       std::swap(m_outerSize, other.m_outerSize);
00733       std::swap(m_innerNonZeros, other.m_innerNonZeros);
00734       m_data.swap(other.m_data);
00735     }
00736 
00739     inline void setIdentity()
00740     {
00741       eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
00742       this->m_data.resize(rows());
00743       Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
00744       Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
00745       Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
00746       std::free(m_innerNonZeros);
00747       m_innerNonZeros = 0;
00748     }
00749     inline SparseMatrix& operator=(const SparseMatrix& other)
00750     {
00751       if (other.isRValue())
00752       {
00753         swap(other.const_cast_derived());
00754       }
00755       else if(this!=&other)
00756       {
00757         #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
00758           EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
00759         #endif
00760         initAssignment(other);
00761         if(other.isCompressed())
00762         {
00763           internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
00764           m_data = other.m_data;
00765         }
00766         else
00767         {
00768           Base::operator=(other);
00769         }
00770       }
00771       return *this;
00772     }
00773 
00774 #ifndef EIGEN_PARSED_BY_DOXYGEN
00775     template<typename OtherDerived>
00776     inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
00777     { return Base::operator=(other.derived()); }
00778 #endif // EIGEN_PARSED_BY_DOXYGEN
00779 
00780     template<typename OtherDerived>
00781     EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
00782 
00783     friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
00784     {
00785       EIGEN_DBG_SPARSE(
00786         s << "Nonzero entries:\n";
00787         if(m.isCompressed())
00788           for (Index i=0; i<m.nonZeros(); ++i)
00789             s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
00790         else
00791           for (Index i=0; i<m.outerSize(); ++i)
00792           {
00793             Index p = m.m_outerIndex[i];
00794             Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
00795             Index k=p;
00796             for (; k<pe; ++k)
00797               s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
00798             for (; k<m.m_outerIndex[i+1]; ++k)
00799               s << "(_,_) ";
00800           }
00801         s << std::endl;
00802         s << std::endl;
00803         s << "Outer pointers:\n";
00804         for (Index i=0; i<m.outerSize(); ++i)
00805           s << m.m_outerIndex[i] << " ";
00806         s << " $" << std::endl;
00807         if(!m.isCompressed())
00808         {
00809           s << "Inner non zeros:\n";
00810           for (Index i=0; i<m.outerSize(); ++i)
00811             s << m.m_innerNonZeros[i] << " ";
00812           s << " $" << std::endl;
00813         }
00814         s << std::endl;
00815       );
00816       s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
00817       return s;
00818     }
00819 
00821     inline ~SparseMatrix()
00822     {
00823       std::free(m_outerIndex);
00824       std::free(m_innerNonZeros);
00825     }
00826 
00828     Scalar sum() const;
00829     
00830 #   ifdef EIGEN_SPARSEMATRIX_PLUGIN
00831 #     include EIGEN_SPARSEMATRIX_PLUGIN
00832 #   endif
00833 
00834 protected:
00835 
00836     template<typename Other>
00837     void initAssignment(const Other& other)
00838     {
00839       resize(other.rows(), other.cols());
00840       if(m_innerNonZeros)
00841       {
00842         std::free(m_innerNonZeros);
00843         m_innerNonZeros = 0;
00844       }
00845     }
00846 
00849     EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
00850 
00853     class SingletonVector
00854     {
00855         StorageIndex m_index;
00856         StorageIndex m_value;
00857       public:
00858         typedef StorageIndex value_type;
00859         SingletonVector(Index i, Index v)
00860           : m_index(convert_index(i)), m_value(convert_index(v))
00861         {}
00862 
00863         StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
00864     };
00865 
00868     EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
00869 
00870 public:
00873     EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
00874     {
00875       const Index outer = IsRowMajor ? row : col;
00876       const Index inner = IsRowMajor ? col : row;
00877 
00878       eigen_assert(!isCompressed());
00879       eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
00880 
00881       Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
00882       m_data.index(p) = convert_index(inner);
00883       return (m_data.value(p) = 0);
00884     }
00885 
00886 private:
00887   static void check_template_parameters()
00888   {
00889     EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
00890     EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
00891   }
00892 
00893   struct default_prunning_func {
00894     default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
00895     inline bool operator() (const Index&, const Index&, const Scalar& value) const
00896     {
00897       return !internal::isMuchSmallerThan(value, reference, epsilon);
00898     }
00899     Scalar reference;
00900     RealScalar epsilon;
00901   };
00902 };
00903 
00904 namespace internal {
00905 
00906 template<typename InputIterator, typename SparseMatrixType, typename DupFunctor>
00907 void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
00908 {
00909   enum { IsRowMajor = SparseMatrixType::IsRowMajor };
00910   typedef typename SparseMatrixType::Scalar Scalar;
00911   typedef typename SparseMatrixType::StorageIndex StorageIndex;
00912   SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());
00913 
00914   if(begin!=end)
00915   {
00916     // pass 1: count the nnz per inner-vector
00917     typename SparseMatrixType::IndexVector wi(trMat.outerSize());
00918     wi.setZero();
00919     for(InputIterator it(begin); it!=end; ++it)
00920     {
00921       eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
00922       wi(IsRowMajor ? it->col() : it->row())++;
00923     }
00924 
00925     // pass 2: insert all the elements into trMat
00926     trMat.reserve(wi);
00927     for(InputIterator it(begin); it!=end; ++it)
00928       trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
00929 
00930     // pass 3:
00931     trMat.collapseDuplicates(dup_func);
00932   }
00933 
00934   // pass 4: transposed copy -> implicit sorting
00935   mat = trMat;
00936 }
00937 
00938 }
00939 
00940 
00978 template<typename Scalar, int _Options, typename _Index>
00979 template<typename InputIterators>
00980 void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
00981 {
00982   internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_Index> >(begin, end, *this, internal::scalar_sum_op<Scalar>());
00983 }
00984 
00994 template<typename Scalar, int _Options, typename _Index>
00995 template<typename InputIterators,typename DupFunctor>
00996 void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
00997 {
00998   internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_Index>, DupFunctor>(begin, end, *this, dup_func);
00999 }
01000 
01002 template<typename Scalar, int _Options, typename _Index>
01003 template<typename DupFunctor>
01004 void SparseMatrix<Scalar,_Options,_Index>::collapseDuplicates(DupFunctor dup_func)
01005 {
01006   eigen_assert(!isCompressed());
01007   // TODO, in practice we should be able to use m_innerNonZeros for that task
01008   IndexVector wi(innerSize());
01009   wi.fill(-1);
01010   StorageIndex count = 0;
01011   // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
01012   for(Index j=0; j<outerSize(); ++j)
01013   {
01014     StorageIndex start   = count;
01015     Index oldEnd  = m_outerIndex[j]+m_innerNonZeros[j];
01016     for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
01017     {
01018       Index i = m_data.index(k);
01019       if(wi(i)>=start)
01020       {
01021         // we already meet this entry => accumulate it
01022         m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
01023       }
01024       else
01025       {
01026         m_data.value(count) = m_data.value(k);
01027         m_data.index(count) = m_data.index(k);
01028         wi(i) = count;
01029         ++count;
01030       }
01031     }
01032     m_outerIndex[j] = start;
01033   }
01034   m_outerIndex[m_outerSize] = count;
01035 
01036   // turn the matrix into compressed form
01037   std::free(m_innerNonZeros);
01038   m_innerNonZeros = 0;
01039   m_data.resize(m_outerIndex[m_outerSize]);
01040 }
01041 
01042 template<typename Scalar, int _Options, typename _Index>
01043 template<typename OtherDerived>
01044 EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Options,_Index>::operator=(const SparseMatrixBase<OtherDerived>& other)
01045 {
01046   EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
01047         YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
01048 
01049   #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
01050     EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
01051   #endif
01052       
01053   const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
01054   if (needToTranspose)
01055   {
01056     #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
01057       EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
01058     #endif
01059     // two passes algorithm:
01060     //  1 - compute the number of coeffs per dest inner vector
01061     //  2 - do the actual copy/eval
01062     // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
01063     typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
01064     typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
01065     typedef internal::evaluator<_OtherCopy> OtherCopyEval;
01066     OtherCopy otherCopy(other.derived());
01067     OtherCopyEval otherCopyEval(otherCopy);
01068 
01069     SparseMatrix dest(other.rows(),other.cols());
01070     Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
01071 
01072     // pass 1
01073     // FIXME the above copy could be merged with that pass
01074     for (Index j=0; j<otherCopy.outerSize(); ++j)
01075       for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
01076         ++dest.m_outerIndex[it.index()];
01077 
01078     // prefix sum
01079     StorageIndex count = 0;
01080     IndexVector positions(dest.outerSize());
01081     for (Index j=0; j<dest.outerSize(); ++j)
01082     {
01083       Index tmp = dest.m_outerIndex[j];
01084       dest.m_outerIndex[j] = count;
01085       positions[j] = count;
01086       count += tmp;
01087     }
01088     dest.m_outerIndex[dest.outerSize()] = count;
01089     // alloc
01090     dest.m_data.resize(count);
01091     // pass 2
01092     for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
01093     {
01094       for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
01095       {
01096         Index pos = positions[it.index()]++;
01097         dest.m_data.index(pos) = j;
01098         dest.m_data.value(pos) = it.value();
01099       }
01100     }
01101     this->swap(dest);
01102     return *this;
01103   }
01104   else
01105   {
01106     if(other.isRValue())
01107     {
01108       initAssignment(other.derived());
01109     }
01110     // there is no special optimization
01111     return Base::operator=(other.derived());
01112   }
01113 }
01114 
01115 template<typename _Scalar, int _Options, typename _Index>
01116 typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insert(Index row, Index col)
01117 {
01118   eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
01119   
01120   const Index outer = IsRowMajor ? row : col;
01121   const Index inner = IsRowMajor ? col : row;
01122   
01123   if(isCompressed())
01124   {
01125     if(nonZeros()==0)
01126     {
01127       // reserve space if not already done
01128       if(m_data.allocatedSize()==0)
01129         m_data.reserve(2*m_innerSize);
01130       
01131       // turn the matrix into non-compressed mode
01132       m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
01133       if(!m_innerNonZeros) internal::throw_std_bad_alloc();
01134       
01135       memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
01136       
01137       // pack all inner-vectors to the end of the pre-allocated space
01138       // and allocate the entire free-space to the first inner-vector
01139       StorageIndex end = convert_index(m_data.allocatedSize());
01140       for(Index j=1; j<=m_outerSize; ++j)
01141         m_outerIndex[j] = end;
01142     }
01143     else
01144     {
01145       // turn the matrix into non-compressed mode
01146       m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
01147       if(!m_innerNonZeros) internal::throw_std_bad_alloc();
01148       for(Index j=0; j<m_outerSize; ++j)
01149         m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
01150     }
01151   }
01152   
01153   // check whether we can do a fast "push back" insertion
01154   Index data_end = m_data.allocatedSize();
01155   
01156   // First case: we are filling a new inner vector which is packed at the end.
01157   // We assume that all remaining inner-vectors are also empty and packed to the end.
01158   if(m_outerIndex[outer]==data_end)
01159   {
01160     eigen_internal_assert(m_innerNonZeros[outer]==0);
01161     
01162     // pack previous empty inner-vectors to end of the used-space
01163     // and allocate the entire free-space to the current inner-vector.
01164     StorageIndex p = convert_index(m_data.size());
01165     Index j = outer;
01166     while(j>=0 && m_innerNonZeros[j]==0)
01167       m_outerIndex[j--] = p;
01168     
01169     // push back the new element
01170     ++m_innerNonZeros[outer];
01171     m_data.append(Scalar(0), inner);
01172     
01173     // check for reallocation
01174     if(data_end != m_data.allocatedSize())
01175     {
01176       // m_data has been reallocated
01177       //  -> move remaining inner-vectors back to the end of the free-space
01178       //     so that the entire free-space is allocated to the current inner-vector.
01179       eigen_internal_assert(data_end < m_data.allocatedSize());
01180       StorageIndex new_end = convert_index(m_data.allocatedSize());
01181       for(Index k=outer+1; k<=m_outerSize; ++k)
01182         if(m_outerIndex[k]==data_end)
01183           m_outerIndex[k] = new_end;
01184     }
01185     return m_data.value(p);
01186   }
01187   
01188   // Second case: the next inner-vector is packed to the end
01189   // and the current inner-vector end match the used-space.
01190   if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
01191   {
01192     eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
01193     
01194     // add space for the new element
01195     ++m_innerNonZeros[outer];
01196     m_data.resize(m_data.size()+1);
01197     
01198     // check for reallocation
01199     if(data_end != m_data.allocatedSize())
01200     {
01201       // m_data has been reallocated
01202       //  -> move remaining inner-vectors back to the end of the free-space
01203       //     so that the entire free-space is allocated to the current inner-vector.
01204       eigen_internal_assert(data_end < m_data.allocatedSize());
01205       StorageIndex new_end = convert_index(m_data.allocatedSize());
01206       for(Index k=outer+1; k<=m_outerSize; ++k)
01207         if(m_outerIndex[k]==data_end)
01208           m_outerIndex[k] = new_end;
01209     }
01210     
01211     // and insert it at the right position (sorted insertion)
01212     Index startId = m_outerIndex[outer];
01213     Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
01214     while ( (p > startId) && (m_data.index(p-1) > inner) )
01215     {
01216       m_data.index(p) = m_data.index(p-1);
01217       m_data.value(p) = m_data.value(p-1);
01218       --p;
01219     }
01220     
01221     m_data.index(p) = convert_index(inner);
01222     return (m_data.value(p) = 0);
01223   }
01224   
01225   if(m_data.size() != m_data.allocatedSize())
01226   {
01227     // make sure the matrix is compatible to random un-compressed insertion:
01228     m_data.resize(m_data.allocatedSize());
01229     this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
01230   }
01231   
01232   return insertUncompressed(row,col);
01233 }
01234     
01235 template<typename _Scalar, int _Options, typename _Index>
01236 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertUncompressed(Index row, Index col)
01237 {
01238   eigen_assert(!isCompressed());
01239 
01240   const Index outer = IsRowMajor ? row : col;
01241   const StorageIndex inner = convert_index(IsRowMajor ? col : row);
01242 
01243   Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
01244   StorageIndex innerNNZ = m_innerNonZeros[outer];
01245   if(innerNNZ>=room)
01246   {
01247     // this inner vector is full, we need to reallocate the whole buffer :(
01248     reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
01249   }
01250 
01251   Index startId = m_outerIndex[outer];
01252   Index p = startId + m_innerNonZeros[outer];
01253   while ( (p > startId) && (m_data.index(p-1) > inner) )
01254   {
01255     m_data.index(p) = m_data.index(p-1);
01256     m_data.value(p) = m_data.value(p-1);
01257     --p;
01258   }
01259   eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
01260 
01261   m_innerNonZeros[outer]++;
01262 
01263   m_data.index(p) = inner;
01264   return (m_data.value(p) = 0);
01265 }
01266 
01267 template<typename _Scalar, int _Options, typename _Index>
01268 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertCompressed(Index row, Index col)
01269 {
01270   eigen_assert(isCompressed());
01271 
01272   const Index outer = IsRowMajor ? row : col;
01273   const Index inner = IsRowMajor ? col : row;
01274 
01275   Index previousOuter = outer;
01276   if (m_outerIndex[outer+1]==0)
01277   {
01278     // we start a new inner vector
01279     while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
01280     {
01281       m_outerIndex[previousOuter] = convert_index(m_data.size());
01282       --previousOuter;
01283     }
01284     m_outerIndex[outer+1] = m_outerIndex[outer];
01285   }
01286 
01287   // here we have to handle the tricky case where the outerIndex array
01288   // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
01289   // the 2nd inner vector...
01290   bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
01291                 && (size_t(m_outerIndex[outer+1]) == m_data.size());
01292 
01293   size_t startId = m_outerIndex[outer];
01294   // FIXME let's make sure sizeof(long int) == sizeof(size_t)
01295   size_t p = m_outerIndex[outer+1];
01296   ++m_outerIndex[outer+1];
01297 
01298   double reallocRatio = 1;
01299   if (m_data.allocatedSize()<=m_data.size())
01300   {
01301     // if there is no preallocated memory, let's reserve a minimum of 32 elements
01302     if (m_data.size()==0)
01303     {
01304       m_data.reserve(32);
01305     }
01306     else
01307     {
01308       // we need to reallocate the data, to reduce multiple reallocations
01309       // we use a smart resize algorithm based on the current filling ratio
01310       // in addition, we use double to avoid integers overflows
01311       double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
01312       reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
01313       // furthermore we bound the realloc ratio to:
01314       //   1) reduce multiple minor realloc when the matrix is almost filled
01315       //   2) avoid to allocate too much memory when the matrix is almost empty
01316       reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
01317     }
01318   }
01319   m_data.resize(m_data.size()+1,reallocRatio);
01320 
01321   if (!isLastVec)
01322   {
01323     if (previousOuter==-1)
01324     {
01325       // oops wrong guess.
01326       // let's correct the outer offsets
01327       for (Index k=0; k<=(outer+1); ++k)
01328         m_outerIndex[k] = 0;
01329       Index k=outer+1;
01330       while(m_outerIndex[k]==0)
01331         m_outerIndex[k++] = 1;
01332       while (k<=m_outerSize && m_outerIndex[k]!=0)
01333         m_outerIndex[k++]++;
01334       p = 0;
01335       --k;
01336       k = m_outerIndex[k]-1;
01337       while (k>0)
01338       {
01339         m_data.index(k) = m_data.index(k-1);
01340         m_data.value(k) = m_data.value(k-1);
01341         k--;
01342       }
01343     }
01344     else
01345     {
01346       // we are not inserting into the last inner vec
01347       // update outer indices:
01348       Index j = outer+2;
01349       while (j<=m_outerSize && m_outerIndex[j]!=0)
01350         m_outerIndex[j++]++;
01351       --j;
01352       // shift data of last vecs:
01353       Index k = m_outerIndex[j]-1;
01354       while (k>=Index(p))
01355       {
01356         m_data.index(k) = m_data.index(k-1);
01357         m_data.value(k) = m_data.value(k-1);
01358         k--;
01359       }
01360     }
01361   }
01362 
01363   while ( (p > startId) && (m_data.index(p-1) > inner) )
01364   {
01365     m_data.index(p) = m_data.index(p-1);
01366     m_data.value(p) = m_data.value(p-1);
01367     --p;
01368   }
01369 
01370   m_data.index(p) = inner;
01371   return (m_data.value(p) = 0);
01372 }
01373 
01374 namespace internal {
01375 
01376 template<typename _Scalar, int _Options, typename _Index>
01377 struct evaluator<SparseMatrix<_Scalar,_Options,_Index> >
01378   : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_Index> > >
01379 {
01380   typedef evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_Index> > > Base;
01381   typedef SparseMatrix<_Scalar,_Options,_Index> SparseMatrixType;
01382   evaluator() : Base() {}
01383   explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
01384 };
01385 
01386 }
01387 
01388 } // end namespace Eigen
01389 
01390 #endif // EIGEN_SPARSEMATRIX_H
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines