MOAB  4.9.3pre
SparseBlock.h
Go to the documentation of this file.
00001 // This file is part of Eigen, a lightweight C++ template library
00002 // for linear algebra.
00003 //
00004 // Copyright (C) 2008-2014 Gael Guennebaud <[email protected]>
00005 //
00006 // This Source Code Form is subject to the terms of the Mozilla
00007 // Public License v. 2.0. If a copy of the MPL was not distributed
00008 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
00009 
00010 #ifndef EIGEN_SPARSE_BLOCK_H
00011 #define EIGEN_SPARSE_BLOCK_H
00012 
00013 namespace Eigen { 
00014 
00015 // Subset of columns or rows
00016 template<typename XprType, int BlockRows, int BlockCols>
00017 class BlockImpl<XprType,BlockRows,BlockCols,true,Sparse>
00018   : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,true> >
00019 {
00020     typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
00021     typedef Block<XprType, BlockRows, BlockCols, true> BlockType;
00022 public:
00023     enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
00024 protected:
00025     enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
00026     typedef SparseMatrixBase<BlockType> Base;
00027     using Base::convert_index;
00028 public:
00029     EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
00030 
00031     inline BlockImpl(XprType& xpr, Index i)
00032       : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)
00033     {}
00034 
00035     inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
00036       : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))
00037     {}
00038 
00039     EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
00040     EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
00041     
00042     Index nonZeros() const
00043     {
00044       typedef internal::evaluator<XprType> EvaluatorType;
00045       EvaluatorType matEval(m_matrix);
00046       Index nnz = 0;
00047       Index end = m_outerStart + m_outerSize.value();
00048       for(Index j=m_outerStart; j<end; ++j)
00049         for(typename EvaluatorType::InnerIterator it(matEval, j); it; ++it)
00050           ++nnz;
00051       return nnz;
00052     }
00053     
00054     inline const Scalar coeff(Index row, Index col) const
00055     {
00056       return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 :  m_outerStart));
00057     }
00058     
00059     inline const Scalar coeff(Index index) const
00060     {
00061       return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index :  m_outerStart);
00062     }
00063     
00064     inline const XprType& nestedExpression() const { return m_matrix; }
00065     inline XprType& nestedExpression() { return m_matrix; }
00066     Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
00067     Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
00068     Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
00069     Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
00070 
00071   protected:
00072 
00073     typename internal::ref_selector<XprType>::non_const_type m_matrix;
00074     Index m_outerStart;
00075     const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
00076   
00077   protected:
00078     // Disable assignment with clear error message.
00079     // Note that simply removing operator= yields compilation errors with ICC+MSVC
00080     template<typename T>
00081     BlockImpl& operator=(const T&)
00082     {
00083       EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY);
00084       return *this;
00085     }
00086 };
00087 
00088 
00089 /***************************************************************************
00090 * specialization for SparseMatrix
00091 ***************************************************************************/
00092 
00093 namespace internal {
00094 
00095 template<typename SparseMatrixType, int BlockRows, int BlockCols>
00096 class sparse_matrix_block_impl
00097   : public SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> >
00098 {
00099     typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _MatrixTypeNested;
00100     typedef Block<SparseMatrixType, BlockRows, BlockCols, true> BlockType;
00101     typedef SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> > Base;
00102     using Base::convert_index;
00103 public:
00104     enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
00105     EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
00106 protected:
00107     typedef typename Base::IndexVector IndexVector;
00108     enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
00109 public:
00110 
00111     inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index i)
00112       : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)
00113     {}
00114 
00115     inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
00116       : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))
00117     {}
00118 
00119     template<typename OtherDerived>
00120     inline BlockType& operator=(const SparseMatrixBase<OtherDerived>& other)
00121     {
00122       typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _NestedMatrixType;
00123       _NestedMatrixType& matrix = m_matrix;
00124       // This assignment is slow if this vector set is not empty
00125       // and/or it is not at the end of the nonzeros of the underlying matrix.
00126 
00127       // 1 - eval to a temporary to avoid transposition and/or aliasing issues
00128       Ref<const SparseMatrix<Scalar, IsRowMajor ? RowMajor : ColMajor, StorageIndex> > tmp(other.derived());
00129       eigen_internal_assert(tmp.outerSize()==m_outerSize.value());
00130 
00131       // 2 - let's check whether there is enough allocated memory
00132       Index nnz           = tmp.nonZeros();
00133       Index start         = m_outerStart==0 ? 0 : matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block
00134       Index end           = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending position of the current block
00135       Index block_size    = end - start;                                                // available room in the current block
00136       Index tail_size     = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end;
00137       
00138       Index free_size     = m_matrix.isCompressed()
00139                           ? Index(matrix.data().allocatedSize()) + block_size
00140                           : block_size;
00141 
00142       bool update_trailing_pointers = false;
00143       if(nnz>free_size) 
00144       {
00145         // realloc manually to reduce copies
00146         typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz);
00147 
00148         internal::smart_copy(m_matrix.valuePtr(),       m_matrix.valuePtr() + start,      newdata.valuePtr());
00149         internal::smart_copy(m_matrix.innerIndexPtr(),  m_matrix.innerIndexPtr() + start, newdata.indexPtr());
00150 
00151         internal::smart_copy(tmp.valuePtr(),      tmp.valuePtr() + nnz,       newdata.valuePtr() + start);
00152         internal::smart_copy(tmp.innerIndexPtr(), tmp.innerIndexPtr() + nnz,  newdata.indexPtr() + start);
00153 
00154         internal::smart_copy(matrix.valuePtr()+end,       matrix.valuePtr()+end + tail_size,      newdata.valuePtr()+start+nnz);
00155         internal::smart_copy(matrix.innerIndexPtr()+end,  matrix.innerIndexPtr()+end + tail_size, newdata.indexPtr()+start+nnz);
00156         
00157         newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz);
00158 
00159         matrix.data().swap(newdata);
00160 
00161         update_trailing_pointers = true;
00162       }
00163       else
00164       {
00165         if(m_matrix.isCompressed())
00166         {
00167           // no need to realloc, simply copy the tail at its respective position and insert tmp
00168           matrix.data().resize(start + nnz + tail_size);
00169 
00170           internal::smart_memmove(matrix.valuePtr()+end,      matrix.valuePtr() + end+tail_size,      matrix.valuePtr() + start+nnz);
00171           internal::smart_memmove(matrix.innerIndexPtr()+end, matrix.innerIndexPtr() + end+tail_size, matrix.innerIndexPtr() + start+nnz);
00172 
00173           update_trailing_pointers = true;
00174         }
00175 
00176         internal::smart_copy(tmp.valuePtr(),      tmp.valuePtr() + nnz,       matrix.valuePtr() + start);
00177         internal::smart_copy(tmp.innerIndexPtr(), tmp.innerIndexPtr() + nnz,  matrix.innerIndexPtr() + start);
00178       }
00179 
00180       // update outer index pointers and innerNonZeros
00181       if(IsVectorAtCompileTime)
00182       {
00183         if(!m_matrix.isCompressed())
00184           matrix.innerNonZeroPtr()[m_outerStart] = StorageIndex(nnz);
00185         matrix.outerIndexPtr()[m_outerStart] = StorageIndex(start);
00186       }
00187       else
00188       {
00189         StorageIndex p = StorageIndex(start);
00190         for(Index k=0; k<m_outerSize.value(); ++k)
00191         {
00192           Index nnz_k = tmp.innerVector(k).nonZeros();
00193           if(!m_matrix.isCompressed())
00194             matrix.innerNonZeroPtr()[m_outerStart+k] = StorageIndex(nnz_k);
00195           matrix.outerIndexPtr()[m_outerStart+k] = p;
00196           p += nnz_k;
00197         }
00198       }
00199 
00200       if(update_trailing_pointers)
00201       {
00202         StorageIndex offset = internal::convert_index<StorageIndex>(nnz - block_size);
00203         for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k)
00204         {
00205           matrix.outerIndexPtr()[k] += offset;
00206         }
00207       }
00208 
00209       return derived();
00210     }
00211 
00212     inline BlockType& operator=(const BlockType& other)
00213     {
00214       return operator=<BlockType>(other);
00215     }
00216 
00217     inline const Scalar* valuePtr() const
00218     { return m_matrix.valuePtr(); }
00219     inline Scalar* valuePtr()
00220     { return m_matrix.valuePtr(); }
00221 
00222     inline const StorageIndex* innerIndexPtr() const
00223     { return m_matrix.innerIndexPtr(); }
00224     inline StorageIndex* innerIndexPtr()
00225     { return m_matrix.innerIndexPtr(); }
00226 
00227     inline const StorageIndex* outerIndexPtr() const
00228     { return m_matrix.outerIndexPtr() + m_outerStart; }
00229     inline StorageIndex* outerIndexPtr()
00230     { return m_matrix.outerIndexPtr() + m_outerStart; }
00231     
00232     inline const StorageIndex* innerNonZeroPtr() const
00233     { return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
00234     inline StorageIndex* innerNonZeroPtr()
00235     { return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
00236     
00237     bool isCompressed() const { return m_matrix.innerNonZeroPtr()==0; }
00238     
00239     inline Scalar& coeffRef(Index row, Index col)
00240     {
00241       return m_matrix.coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 :  m_outerStart));
00242     }
00243     
00244     inline const Scalar coeff(Index row, Index col) const
00245     {
00246       return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 :  m_outerStart));
00247     }
00248     
00249     inline const Scalar coeff(Index index) const
00250     {
00251       return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index :  m_outerStart);
00252     }
00253 
00254     const Scalar& lastCoeff() const
00255     {
00256       EIGEN_STATIC_ASSERT_VECTOR_ONLY(sparse_matrix_block_impl);
00257       eigen_assert(Base::nonZeros()>0);
00258       if(m_matrix.isCompressed())
00259         return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1];
00260       else
00261         return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1];
00262     }
00263 
00264     EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
00265     EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
00266     
00267     inline const SparseMatrixType& nestedExpression() const { return m_matrix; }
00268     inline SparseMatrixType& nestedExpression() { return m_matrix; }
00269     Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
00270     Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
00271     Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
00272     Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
00273 
00274   protected:
00275 
00276     typename internal::ref_selector<SparseMatrixType>::non_const_type m_matrix;
00277     Index m_outerStart;
00278     const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
00279 
00280 };
00281 
00282 } // namespace internal
00283 
00284 template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
00285 class BlockImpl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse>
00286   : public internal::sparse_matrix_block_impl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols>
00287 {
00288 public:
00289   typedef _StorageIndex StorageIndex;
00290   typedef SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType;
00291   typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;
00292   inline BlockImpl(SparseMatrixType& xpr, Index i)
00293     : Base(xpr, i)
00294   {}
00295 
00296   inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
00297     : Base(xpr, startRow, startCol, blockRows, blockCols)
00298   {}
00299   
00300   using Base::operator=;
00301 };
00302 
00303 template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
00304 class BlockImpl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse>
00305   : public internal::sparse_matrix_block_impl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols>
00306 {
00307 public:
00308   typedef _StorageIndex StorageIndex;
00309   typedef const SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType;
00310   typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;
00311   inline BlockImpl(SparseMatrixType& xpr, Index i)
00312     : Base(xpr, i)
00313   {}
00314 
00315   inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
00316     : Base(xpr, startRow, startCol, blockRows, blockCols)
00317   {}
00318   
00319   using Base::operator=;
00320 private:
00321   template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr, Index i);
00322   template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr);
00323 };
00324 
00325 //----------
00326 
00330 template<typename Derived>
00331 typename SparseMatrixBase<Derived>::InnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer)
00332 { return InnerVectorReturnType(derived(), outer); }
00333 
00337 template<typename Derived>
00338 const typename SparseMatrixBase<Derived>::ConstInnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer) const
00339 { return ConstInnerVectorReturnType(derived(), outer); }
00340 
00344 template<typename Derived>
00345 typename SparseMatrixBase<Derived>::InnerVectorsReturnType
00346 SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
00347 {
00348   return Block<Derived,Dynamic,Dynamic,true>(derived(),
00349                                              IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
00350                                              IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
00351   
00352 }
00353 
00357 template<typename Derived>
00358 const typename SparseMatrixBase<Derived>::ConstInnerVectorsReturnType
00359 SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
00360 {
00361   return Block<const Derived,Dynamic,Dynamic,true>(derived(),
00362                                                   IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
00363                                                   IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
00364   
00365 }
00366 
00370 template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
00371 class BlockImpl<XprType,BlockRows,BlockCols,InnerPanel,Sparse>
00372   : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,InnerPanel> >, internal::no_assignment_operator
00373 {
00374     typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
00375     typedef SparseMatrixBase<BlockType> Base;
00376     using Base::convert_index;
00377 public:
00378     enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
00379     EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
00380     
00381     typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
00382 
00385     inline BlockImpl(XprType& xpr, Index i)
00386       : m_matrix(xpr),
00387         m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? convert_index(i) : 0),
00388         m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? convert_index(i) : 0),
00389         m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
00390         m_blockCols(BlockCols==1 ? 1 : xpr.cols())
00391     {}
00392 
00395     inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
00396       : m_matrix(xpr), m_startRow(convert_index(startRow)), m_startCol(convert_index(startCol)), m_blockRows(convert_index(blockRows)), m_blockCols(convert_index(blockCols))
00397     {}
00398 
00399     inline Index rows() const { return m_blockRows.value(); }
00400     inline Index cols() const { return m_blockCols.value(); }
00401 
00402     inline Scalar& coeffRef(Index row, Index col)
00403     {
00404       return m_matrix.coeffRef(row + m_startRow.value(), col + m_startCol.value());
00405     }
00406 
00407     inline const Scalar coeff(Index row, Index col) const
00408     {
00409       return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value());
00410     }
00411 
00412     inline Scalar& coeffRef(Index index)
00413     {
00414       return m_matrix.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
00415                                m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
00416     }
00417 
00418     inline const Scalar coeff(Index index) const
00419     {
00420       return m_matrix.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
00421                             m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
00422     }
00423     
00424     inline const XprType& nestedExpression() const { return m_matrix; }
00425     inline XprType& nestedExpression() { return m_matrix; }
00426     Index startRow() const { return m_startRow.value(); }
00427     Index startCol() const { return m_startCol.value(); }
00428     Index blockRows() const { return m_blockRows.value(); }
00429     Index blockCols() const { return m_blockCols.value(); }
00430     
00431   protected:
00432 //     friend class internal::GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel>;
00433     friend class ReverseInnerIterator;
00434     friend struct internal::unary_evaluator<Block<XprType,BlockRows,BlockCols,InnerPanel>, internal::IteratorBased, Scalar >;
00435     
00436     Index nonZeros() const { return Dynamic; }
00437 
00438     typename internal::ref_selector<XprType>::non_const_type m_matrix;
00439     const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
00440     const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
00441     const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows;
00442     const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;
00443 
00444   protected:
00445     // Disable assignment with clear error message.
00446     // Note that simply removing operator= yields compilation errors with ICC+MSVC
00447     template<typename T>
00448     BlockImpl& operator=(const T&)
00449     {
00450       EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY);
00451       return *this;
00452     }
00453 
00454 };
00455 
00456 namespace internal {
00457 
00458 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
00459 struct unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased >
00460  : public evaluator_base<Block<ArgType,BlockRows,BlockCols,InnerPanel> >
00461 {
00462     class InnerVectorInnerIterator;
00463     class OuterVectorInnerIterator;
00464   public:
00465     typedef Block<ArgType,BlockRows,BlockCols,InnerPanel> XprType;
00466     typedef typename XprType::StorageIndex StorageIndex;
00467     typedef typename XprType::Scalar Scalar;
00468     
00469     class ReverseInnerIterator;
00470     
00471     enum {
00472       IsRowMajor = XprType::IsRowMajor,
00473       
00474       OuterVector =  (BlockCols==1 && ArgType::IsRowMajor)
00475                     | // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
00476                       // revert to || as soon as not needed anymore. 
00477                      (BlockRows==1 && !ArgType::IsRowMajor),
00478       
00479       CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
00480       Flags = XprType::Flags
00481     };
00482     
00483     typedef typename internal::conditional<OuterVector,OuterVectorInnerIterator,InnerVectorInnerIterator>::type InnerIterator;
00484     
00485     explicit unary_evaluator(const XprType& op)
00486       : m_argImpl(op.nestedExpression()), m_block(op)
00487     {}
00488     
00489     inline Index nonZerosEstimate() const {
00490       Index nnz = m_block.nonZeros();
00491       if(nnz<0)
00492         return m_argImpl.nonZerosEstimate() * m_block.size() / m_block.nestedExpression().size();
00493       return nnz;
00494     }
00495 
00496   protected:
00497     typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
00498     
00499     evaluator<ArgType> m_argImpl;
00500     const XprType &m_block;
00501 };
00502 
00503 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
00504 class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::InnerVectorInnerIterator
00505  : public EvalIterator
00506 {
00507   const XprType& m_block;
00508   Index m_end;
00509 public:
00510   
00511   EIGEN_STRONG_INLINE InnerVectorInnerIterator(const unary_evaluator& aEval, Index outer)
00512     : EvalIterator(aEval.m_argImpl, outer + (IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol())),
00513       m_block(aEval.m_block),
00514       m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows())
00515   {
00516     while( (EvalIterator::operator bool()) && (EvalIterator::index() < (IsRowMajor ? m_block.startCol() : m_block.startRow())) )
00517       EvalIterator::operator++();
00518   }
00519   
00520   inline StorageIndex index() const { return EvalIterator::index() - convert_index<StorageIndex>(IsRowMajor ? m_block.startCol() : m_block.startRow()); }
00521   inline Index outer()  const { return EvalIterator::outer() - (IsRowMajor ? m_block.startRow() : m_block.startCol()); }
00522   inline Index row()    const { return EvalIterator::row()   - m_block.startRow(); }
00523   inline Index col()    const { return EvalIterator::col()   - m_block.startCol(); }
00524   
00525   inline operator bool() const { return EvalIterator::operator bool() && EvalIterator::index() < m_end; }
00526 };
00527 
00528 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
00529 class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::OuterVectorInnerIterator
00530 {
00531   const unary_evaluator& m_eval;
00532   Index m_outerPos;
00533   Index m_innerIndex;
00534   Scalar m_value;
00535   Index m_end;
00536 public:
00537 
00538   EIGEN_STRONG_INLINE OuterVectorInnerIterator(const unary_evaluator& aEval, Index outer)
00539     : m_eval(aEval),
00540       m_outerPos( (IsRowMajor ? aEval.m_block.startCol() : aEval.m_block.startRow()) - 1), // -1 so that operator++ finds the first non-zero entry
00541       m_innerIndex(IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol()),
00542       m_value(0),
00543       m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows())
00544   {
00545     EIGEN_UNUSED_VARIABLE(outer);
00546     eigen_assert(outer==0);
00547     
00548     ++(*this);
00549   }
00550   
00551   inline StorageIndex index() const { return convert_index<StorageIndex>(m_outerPos - (IsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow())); }
00552   inline Index outer()  const { return 0; }
00553   inline Index row()    const { return IsRowMajor ? 0 : index(); }
00554   inline Index col()    const { return IsRowMajor ? index() : 0; }
00555   
00556   inline Scalar value() const { return m_value; }
00557   
00558   inline OuterVectorInnerIterator& operator++()
00559   {
00560     // search next non-zero entry
00561     while(++m_outerPos<m_end)
00562     {
00563       EvalIterator it(m_eval.m_argImpl, m_outerPos);
00564       // search for the key m_innerIndex in the current outer-vector
00565       while(it && it.index() < m_innerIndex) ++it;
00566       if(it && it.index()==m_innerIndex)
00567       {
00568         m_value = it.value();
00569         break;
00570       }
00571     }
00572     return *this;
00573   }
00574   
00575   inline operator bool() const { return m_outerPos < m_end; }
00576 };
00577 
00578 template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
00579 struct unary_evaluator<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased>
00580   : evaluator<SparseCompressedBase<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > >
00581 {
00582   typedef Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> XprType;
00583   typedef evaluator<SparseCompressedBase<XprType> > Base;
00584   explicit unary_evaluator(const XprType &xpr) : Base(xpr) {}
00585 };
00586 
00587 template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
00588 struct unary_evaluator<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased>
00589   : evaluator<SparseCompressedBase<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > >
00590 {
00591   typedef Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> XprType;
00592   typedef evaluator<SparseCompressedBase<XprType> > Base;
00593   explicit unary_evaluator(const XprType &xpr) : Base(xpr) {}
00594 };
00595 
00596 } // end namespace internal
00597 
00598 
00599 } // end namespace Eigen
00600 
00601 #endif // EIGEN_SPARSE_BLOCK_H
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines