MOAB
4.9.3pre
|
00001 // This file is part of Eigen, a lightweight C++ template library 00002 // for linear algebra. 00003 // 00004 // Copyright (C) 2008-2009 Gael Guennebaud <[email protected]> 00005 // 00006 // This Source Code Form is subject to the terms of the Mozilla 00007 // Public License v. 2.0. If a copy of the MPL was not distributed 00008 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. 00009 00010 #ifndef EIGEN_SELFADJOINT_MATRIX_VECTOR_H 00011 #define EIGEN_SELFADJOINT_MATRIX_VECTOR_H 00012 00013 namespace Eigen { 00014 00015 namespace internal { 00016 00017 /* Optimized selfadjoint matrix * vector product: 00018 * This algorithm processes 2 columns at onces that allows to both reduce 00019 * the number of load/stores of the result by a factor 2 and to reduce 00020 * the instruction dependency. 00021 */ 00022 00023 template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version=Specialized> 00024 struct selfadjoint_matrix_vector_product; 00025 00026 template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version> 00027 struct selfadjoint_matrix_vector_product 00028 00029 { 00030 static EIGEN_DONT_INLINE void run( 00031 Index size, 00032 const Scalar* lhs, Index lhsStride, 00033 const Scalar* rhs, 00034 Scalar* res, 00035 Scalar alpha); 00036 }; 00037 00038 template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version> 00039 EIGEN_DONT_INLINE void selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,Version>::run( 00040 Index size, 00041 const Scalar* lhs, Index lhsStride, 00042 const Scalar* rhs, 00043 Scalar* res, 00044 Scalar alpha) 00045 { 00046 typedef typename packet_traits<Scalar>::type Packet; 00047 typedef typename NumTraits<Scalar>::Real RealScalar; 00048 const Index PacketSize = sizeof(Packet)/sizeof(Scalar); 00049 00050 enum { 00051 IsRowMajor = StorageOrder==RowMajor ? 1 : 0, 00052 IsLower = UpLo == Lower ? 1 : 0, 00053 FirstTriangular = IsRowMajor == IsLower 00054 }; 00055 00056 conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, IsRowMajor), ConjugateRhs> cj0; 00057 conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> cj1; 00058 conj_helper<RealScalar,Scalar,false, ConjugateRhs> cjd; 00059 00060 conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, IsRowMajor), ConjugateRhs> pcj0; 00061 conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> pcj1; 00062 00063 Scalar cjAlpha = ConjugateRhs ? numext::conj(alpha) : alpha; 00064 00065 00066 Index bound = (std::max)(Index(0),size-8) & 0xfffffffe; 00067 if (FirstTriangular) 00068 bound = size - bound; 00069 00070 for (Index j=FirstTriangular ? bound : 0; 00071 j<(FirstTriangular ? size : bound);j+=2) 00072 { 00073 const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride; 00074 const Scalar* EIGEN_RESTRICT A1 = lhs + (j+1)*lhsStride; 00075 00076 Scalar t0 = cjAlpha * rhs[j]; 00077 Packet ptmp0 = pset1<Packet>(t0); 00078 Scalar t1 = cjAlpha * rhs[j+1]; 00079 Packet ptmp1 = pset1<Packet>(t1); 00080 00081 Scalar t2(0); 00082 Packet ptmp2 = pset1<Packet>(t2); 00083 Scalar t3(0); 00084 Packet ptmp3 = pset1<Packet>(t3); 00085 00086 size_t starti = FirstTriangular ? 0 : j+2; 00087 size_t endi = FirstTriangular ? j : size; 00088 size_t alignedStart = (starti) + internal::first_default_aligned(&res[starti], endi-starti); 00089 size_t alignedEnd = alignedStart + ((endi-alignedStart)/(PacketSize))*(PacketSize); 00090 00091 res[j] += cjd.pmul(numext::real(A0[j]), t0); 00092 res[j+1] += cjd.pmul(numext::real(A1[j+1]), t1); 00093 if(FirstTriangular) 00094 { 00095 res[j] += cj0.pmul(A1[j], t1); 00096 t3 += cj1.pmul(A1[j], rhs[j]); 00097 } 00098 else 00099 { 00100 res[j+1] += cj0.pmul(A0[j+1],t0); 00101 t2 += cj1.pmul(A0[j+1], rhs[j+1]); 00102 } 00103 00104 for (size_t i=starti; i<alignedStart; ++i) 00105 { 00106 res[i] += cj0.pmul(A0[i], t0) + cj0.pmul(A1[i],t1); 00107 t2 += cj1.pmul(A0[i], rhs[i]); 00108 t3 += cj1.pmul(A1[i], rhs[i]); 00109 } 00110 // Yes this an optimization for gcc 4.3 and 4.4 (=> huge speed up) 00111 // gcc 4.2 does this optimization automatically. 00112 const Scalar* EIGEN_RESTRICT a0It = A0 + alignedStart; 00113 const Scalar* EIGEN_RESTRICT a1It = A1 + alignedStart; 00114 const Scalar* EIGEN_RESTRICT rhsIt = rhs + alignedStart; 00115 Scalar* EIGEN_RESTRICT resIt = res + alignedStart; 00116 for (size_t i=alignedStart; i<alignedEnd; i+=PacketSize) 00117 { 00118 Packet A0i = ploadu<Packet>(a0It); a0It += PacketSize; 00119 Packet A1i = ploadu<Packet>(a1It); a1It += PacketSize; 00120 Packet Bi = ploadu<Packet>(rhsIt); rhsIt += PacketSize; // FIXME should be aligned in most cases 00121 Packet Xi = pload <Packet>(resIt); 00122 00123 Xi = pcj0.pmadd(A0i,ptmp0, pcj0.pmadd(A1i,ptmp1,Xi)); 00124 ptmp2 = pcj1.pmadd(A0i, Bi, ptmp2); 00125 ptmp3 = pcj1.pmadd(A1i, Bi, ptmp3); 00126 pstore(resIt,Xi); resIt += PacketSize; 00127 } 00128 for (size_t i=alignedEnd; i<endi; i++) 00129 { 00130 res[i] += cj0.pmul(A0[i], t0) + cj0.pmul(A1[i],t1); 00131 t2 += cj1.pmul(A0[i], rhs[i]); 00132 t3 += cj1.pmul(A1[i], rhs[i]); 00133 } 00134 00135 res[j] += alpha * (t2 + predux(ptmp2)); 00136 res[j+1] += alpha * (t3 + predux(ptmp3)); 00137 } 00138 for (Index j=FirstTriangular ? 0 : bound;j<(FirstTriangular ? bound : size);j++) 00139 { 00140 const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride; 00141 00142 Scalar t1 = cjAlpha * rhs[j]; 00143 Scalar t2(0); 00144 res[j] += cjd.pmul(numext::real(A0[j]), t1); 00145 for (Index i=FirstTriangular ? 0 : j+1; i<(FirstTriangular ? j : size); i++) 00146 { 00147 res[i] += cj0.pmul(A0[i], t1); 00148 t2 += cj1.pmul(A0[i], rhs[i]); 00149 } 00150 res[j] += alpha * t2; 00151 } 00152 } 00153 00154 } // end namespace internal 00155 00156 /*************************************************************************** 00157 * Wrapper to product_selfadjoint_vector 00158 ***************************************************************************/ 00159 00160 namespace internal { 00161 00162 template<typename Lhs, int LhsMode, typename Rhs> 00163 struct selfadjoint_product_impl<Lhs,LhsMode,false,Rhs,0,true> 00164 { 00165 typedef typename Product<Lhs,Rhs>::Scalar Scalar; 00166 00167 typedef internal::blas_traits<Lhs> LhsBlasTraits; 00168 typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; 00169 typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned; 00170 00171 typedef internal::blas_traits<Rhs> RhsBlasTraits; 00172 typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; 00173 typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned; 00174 00175 enum { LhsUpLo = LhsMode&(Upper|Lower) }; 00176 00177 template<typename Dest> 00178 static void run(Dest& dest, const Lhs &a_lhs, const Rhs &a_rhs, const Scalar& alpha) 00179 { 00180 typedef typename Dest::Scalar ResScalar; 00181 typedef typename Rhs::Scalar RhsScalar; 00182 typedef Map<Matrix<ResScalar,Dynamic,1>, Aligned> MappedDest; 00183 00184 eigen_assert(dest.rows()==a_lhs.rows() && dest.cols()==a_rhs.cols()); 00185 00186 typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs); 00187 typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs); 00188 00189 Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs) 00190 * RhsBlasTraits::extractScalarFactor(a_rhs); 00191 00192 enum { 00193 EvalToDest = (Dest::InnerStrideAtCompileTime==1), 00194 UseRhs = (ActualRhsTypeCleaned::InnerStrideAtCompileTime==1) 00195 }; 00196 00197 internal::gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,!EvalToDest> static_dest; 00198 internal::gemv_static_vector_if<RhsScalar,ActualRhsTypeCleaned::SizeAtCompileTime,ActualRhsTypeCleaned::MaxSizeAtCompileTime,!UseRhs> static_rhs; 00199 00200 ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(), 00201 EvalToDest ? dest.data() : static_dest.data()); 00202 00203 ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,rhs.size(), 00204 UseRhs ? const_cast<RhsScalar*>(rhs.data()) : static_rhs.data()); 00205 00206 if(!EvalToDest) 00207 { 00208 #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN 00209 Index size = dest.size(); 00210 EIGEN_DENSE_STORAGE_CTOR_PLUGIN 00211 #endif 00212 MappedDest(actualDestPtr, dest.size()) = dest; 00213 } 00214 00215 if(!UseRhs) 00216 { 00217 #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN 00218 Index size = rhs.size(); 00219 EIGEN_DENSE_STORAGE_CTOR_PLUGIN 00220 #endif 00221 Map<typename ActualRhsTypeCleaned::PlainObject>(actualRhsPtr, rhs.size()) = rhs; 00222 } 00223 00224 00225 internal::selfadjoint_matrix_vector_product<Scalar, Index, (internal::traits<ActualLhsTypeCleaned>::Flags&RowMajorBit) ? RowMajor : ColMajor, 00226 int(LhsUpLo), bool(LhsBlasTraits::NeedToConjugate), bool(RhsBlasTraits::NeedToConjugate)>::run 00227 ( 00228 lhs.rows(), // size 00229 &lhs.coeffRef(0,0), lhs.outerStride(), // lhs info 00230 actualRhsPtr, // rhs info 00231 actualDestPtr, // result info 00232 actualAlpha // scale factor 00233 ); 00234 00235 if(!EvalToDest) 00236 dest = MappedDest(actualDestPtr, dest.size()); 00237 } 00238 }; 00239 00240 template<typename Lhs, typename Rhs, int RhsMode> 00241 struct selfadjoint_product_impl<Lhs,0,true,Rhs,RhsMode,false> 00242 { 00243 typedef typename Product<Lhs,Rhs>::Scalar Scalar; 00244 enum { RhsUpLo = RhsMode&(Upper|Lower) }; 00245 00246 template<typename Dest> 00247 static void run(Dest& dest, const Lhs &a_lhs, const Rhs &a_rhs, const Scalar& alpha) 00248 { 00249 // let's simply transpose the product 00250 Transpose<Dest> destT(dest); 00251 selfadjoint_product_impl<Transpose<const Rhs>, int(RhsUpLo)==Upper ? Lower : Upper, false, 00252 Transpose<const Lhs>, 0, true>::run(destT, a_rhs.transpose(), a_lhs.transpose(), alpha); 00253 } 00254 }; 00255 00256 } // end namespace internal 00257 00258 } // end namespace Eigen 00259 00260 #endif // EIGEN_SELFADJOINT_MATRIX_VECTOR_H