MOAB  4.9.3pre
GeneralBlockPanelKernel.h
Go to the documentation of this file.
00001 // This file is part of Eigen, a lightweight C++ template library
00002 // for linear algebra.
00003 //
00004 // Copyright (C) 2008-2009 Gael Guennebaud <[email protected]>
00005 //
00006 // This Source Code Form is subject to the terms of the Mozilla
00007 // Public License v. 2.0. If a copy of the MPL was not distributed
00008 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
00009 
00010 #ifndef EIGEN_GENERAL_BLOCK_PANEL_H
00011 #define EIGEN_GENERAL_BLOCK_PANEL_H
00012 
00013 
00014 namespace Eigen { 
00015   
00016 namespace internal {
00017 
00018 template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs=false, bool _ConjRhs=false>
00019 class gebp_traits;
00020 
00021 
00023 inline std::ptrdiff_t manage_caching_sizes_helper(std::ptrdiff_t a, std::ptrdiff_t b)
00024 {
00025   return a<=0 ? b : a;
00026 }
00027 
00028 #if EIGEN_ARCH_i386_OR_x86_64
00029 const std::ptrdiff_t defaultL1CacheSize = 32*1024;
00030 const std::ptrdiff_t defaultL2CacheSize = 256*1024;
00031 const std::ptrdiff_t defaultL3CacheSize = 2*1024*1024;
00032 #else
00033 const std::ptrdiff_t defaultL1CacheSize = 16*1024;
00034 const std::ptrdiff_t defaultL2CacheSize = 512*1024;
00035 const std::ptrdiff_t defaultL3CacheSize = 512*1024;
00036 #endif
00037 
00039 struct CacheSizes { 
00040   CacheSizes(): m_l1(-1),m_l2(-1),m_l3(-1) {
00041     int l1CacheSize, l2CacheSize, l3CacheSize;
00042     queryCacheSizes(l1CacheSize, l2CacheSize, l3CacheSize);
00043     m_l1 = manage_caching_sizes_helper(l1CacheSize, defaultL1CacheSize);
00044     m_l2 = manage_caching_sizes_helper(l2CacheSize, defaultL2CacheSize);
00045     m_l3 = manage_caching_sizes_helper(l3CacheSize, defaultL3CacheSize);
00046   }
00047 
00048   std::ptrdiff_t m_l1;
00049   std::ptrdiff_t m_l2;
00050   std::ptrdiff_t m_l3;
00051 };
00052 
00053 
00055 inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1, std::ptrdiff_t* l2, std::ptrdiff_t* l3)
00056 {
00057   static CacheSizes m_cacheSizes;
00058 
00059   if(action==SetAction)
00060   {
00061     // set the cpu cache size and cache all block sizes from a global cache size in byte
00062     eigen_internal_assert(l1!=0 && l2!=0);
00063     m_cacheSizes.m_l1 = *l1;
00064     m_cacheSizes.m_l2 = *l2;
00065     m_cacheSizes.m_l3 = *l3;
00066   }
00067   else if(action==GetAction)
00068   {
00069     eigen_internal_assert(l1!=0 && l2!=0);
00070     *l1 = m_cacheSizes.m_l1;
00071     *l2 = m_cacheSizes.m_l2;
00072     *l3 = m_cacheSizes.m_l3;
00073   }
00074   else
00075   {
00076     eigen_internal_assert(false);
00077   }
00078 }
00079 
00080 /* Helper for computeProductBlockingSizes.
00081  *
00082  * Given a m x k times k x n matrix product of scalar types \c LhsScalar and \c RhsScalar,
00083  * this function computes the blocking size parameters along the respective dimensions
00084  * for matrix products and related algorithms. The blocking sizes depends on various
00085  * parameters:
00086  * - the L1 and L2 cache sizes,
00087  * - the register level blocking sizes defined by gebp_traits,
00088  * - the number of scalars that fit into a packet (when vectorization is enabled).
00089  *
00090  * \sa setCpuCacheSizes */
00091 
00092 template<typename LhsScalar, typename RhsScalar, int KcFactor>
00093 void evaluateProductBlockingSizesHeuristic(Index& k, Index& m, Index& n, Index num_threads = 1)
00094 {
00095   typedef gebp_traits<LhsScalar,RhsScalar> Traits;
00096 
00097   // Explanations:
00098   // Let's recall that the product algorithms form mc x kc vertical panels A' on the lhs and
00099   // kc x nc blocks B' on the rhs. B' has to fit into L2/L3 cache. Moreover, A' is processed
00100   // per mr x kc horizontal small panels where mr is the blocking size along the m dimension
00101   // at the register level. This small horizontal panel has to stay within L1 cache.
00102   std::ptrdiff_t l1, l2, l3;
00103   manage_caching_sizes(GetAction, &l1, &l2, &l3);
00104 
00105   if (num_threads > 1) {
00106     typedef typename Traits::ResScalar ResScalar;
00107     enum {
00108       kdiv = KcFactor * (Traits::mr * sizeof(LhsScalar) + Traits::nr * sizeof(RhsScalar)),
00109       ksub = Traits::mr * Traits::nr * sizeof(ResScalar),
00110       k_mask = -8,
00111 
00112       mr = Traits::mr,
00113       mr_mask = -mr,
00114 
00115       nr = Traits::nr,
00116       nr_mask = -nr
00117     };
00118     // Increasing k gives us more time to prefetch the content of the "C"
00119     // registers. However once the latency is hidden there is no point in
00120     // increasing the value of k, so we'll cap it at 320 (value determined
00121     // experimentally).
00122     const Index k_cache = (std::min<Index>)((l1-ksub)/kdiv, 320);
00123     if (k_cache < k) {
00124       k = k_cache & k_mask;
00125       eigen_internal_assert(k > 0);
00126     }
00127 
00128     const Index n_cache = (l2-l1) / (nr * sizeof(RhsScalar) * k);
00129     const Index n_per_thread = numext::div_ceil(n, num_threads);
00130     if (n_cache <= n_per_thread) {
00131       // Don't exceed the capacity of the l2 cache.
00132       eigen_internal_assert(n_cache >= static_cast<Index>(nr));
00133       n = n_cache & nr_mask;
00134       eigen_internal_assert(n > 0);
00135     } else {
00136       n = (std::min<Index>)(n, (n_per_thread + nr - 1) & nr_mask);
00137     }
00138 
00139     if (l3 > l2) {
00140       // l3 is shared between all cores, so we'll give each thread its own chunk of l3.
00141       const Index m_cache = (l3-l2) / (sizeof(LhsScalar) * k * num_threads);
00142       const Index m_per_thread = numext::div_ceil(m, num_threads);
00143       if(m_cache < m_per_thread && m_cache >= static_cast<Index>(mr)) {
00144         m = m_cache & mr_mask;
00145         eigen_internal_assert(m > 0);
00146       } else {
00147         m = (std::min<Index>)(m, (m_per_thread + mr - 1) & mr_mask);
00148       }
00149     }
00150   }
00151   else {
00152     // In unit tests we do not want to use extra large matrices,
00153     // so we reduce the cache size to check the blocking strategy is not flawed
00154 #ifdef EIGEN_DEBUG_SMALL_PRODUCT_BLOCKS
00155     l1 = 9*1024;
00156     l2 = 32*1024;
00157     l3 = 512*1024;
00158 #endif
00159     
00160     // Early return for small problems because the computation below are time consuming for small problems.
00161     // Perhaps it would make more sense to consider k*n*m??
00162     // Note that for very tiny problem, this function should be bypassed anyway
00163     // because we use the coefficient-based implementation for them.
00164     if((std::max)(k,(std::max)(m,n))<48)
00165       return;
00166     
00167     typedef typename Traits::ResScalar ResScalar;
00168     enum {
00169       k_peeling = 8,
00170       k_div = KcFactor * (Traits::mr * sizeof(LhsScalar) + Traits::nr * sizeof(RhsScalar)),
00171       k_sub = Traits::mr * Traits::nr * sizeof(ResScalar)
00172     };
00173     
00174     // ---- 1st level of blocking on L1, yields kc ----
00175     
00176     // Blocking on the third dimension (i.e., k) is chosen so that an horizontal panel
00177     // of size mr x kc of the lhs plus a vertical panel of kc x nr of the rhs both fits within L1 cache.
00178     // We also include a register-level block of the result (mx x nr).
00179     // (In an ideal world only the lhs panel would stay in L1)
00180     // Moreover, kc has to be a multiple of 8 to be compatible with loop peeling, leading to a maximum blocking size of:
00181     const Index max_kc = std::max<Index>(((l1-k_sub)/k_div) & (~(k_peeling-1)),1);
00182     const Index old_k = k;
00183     if(k>max_kc)
00184     {
00185       // We are really blocking on the third dimension:
00186       // -> reduce blocking size to make sure the last block is as large as possible
00187       //    while keeping the same number of sweeps over the result.
00188       k = (k%max_kc)==0 ? max_kc
00189                         : max_kc - k_peeling * ((max_kc-1-(k%max_kc))/(k_peeling*(k/max_kc+1)));
00190                         
00191       eigen_internal_assert(((old_k/k) == (old_k/max_kc)) && "the number of sweeps has to remain the same");
00192     }
00193     
00194     // ---- 2nd level of blocking on max(L2,L3), yields nc ----
00195     
00196     // TODO find a reliable way to get the actual amount of cache per core to use for 2nd level blocking, that is:
00197     //      actual_l2 = max(l2, l3/nb_core_sharing_l3)
00198     // The number below is quite conservative: it is better to underestimate the cache size rather than overestimating it)
00199     // For instance, it corresponds to 6MB of L3 shared among 4 cores.
00200     #ifdef EIGEN_DEBUG_SMALL_PRODUCT_BLOCKS
00201     const Index actual_l2 = l3;
00202     #else
00203     const Index actual_l2 = 1572864; // == 1.5 MB
00204     #endif
00205     
00206     // Here, nc is chosen such that a block of kc x nc of the rhs fit within half of L2.
00207     // The second half is implicitly reserved to access the result and lhs coefficients.
00208     // When k<max_kc, then nc can arbitrarily growth. In practice, it seems to be fruitful
00209     // to limit this growth: we bound nc to growth by a factor x1.5.
00210     // However, if the entire lhs block fit within L1, then we are not going to block on the rows at all,
00211     // and it becomes fruitful to keep the packed rhs blocks in L1 if there is enough remaining space.
00212     Index max_nc;
00213     const Index lhs_bytes = m * k * sizeof(LhsScalar);
00214     const Index remaining_l1 = l1- k_sub - lhs_bytes;
00215     if(remaining_l1 >= Index(Traits::nr*sizeof(RhsScalar))*k)
00216     {
00217       // L1 blocking
00218       max_nc = remaining_l1 / (k*sizeof(RhsScalar));
00219     }
00220     else
00221     {
00222       // L2 blocking
00223       max_nc = (3*actual_l2)/(2*2*max_kc*sizeof(RhsScalar));
00224     }
00225     // WARNING Below, we assume that Traits::nr is a power of two.
00226     Index nc = std::min<Index>(actual_l2/(2*k*sizeof(RhsScalar)), max_nc) & (~(Traits::nr-1));
00227     if(n>nc)
00228     {
00229       // We are really blocking over the columns:
00230       // -> reduce blocking size to make sure the last block is as large as possible
00231       //    while keeping the same number of sweeps over the packed lhs.
00232       //    Here we allow one more sweep if this gives us a perfect match, thus the commented "-1"
00233       n = (n%nc)==0 ? nc
00234                     : (nc - Traits::nr * ((nc/*-1*/-(n%nc))/(Traits::nr*(n/nc+1))));
00235     }
00236     else if(old_k==k)
00237     {
00238       // So far, no blocking at all, i.e., kc==k, and nc==n.
00239       // In this case, let's perform a blocking over the rows such that the packed lhs data is kept in cache L1/L2
00240       // TODO: part of this blocking strategy is now implemented within the kernel itself, so the L1-based heuristic here should be obsolete.
00241       Index problem_size = k*n*sizeof(LhsScalar);
00242       Index actual_lm = actual_l2;
00243       Index max_mc = m;
00244       if(problem_size<=1024)
00245       {
00246         // problem is small enough to keep in L1
00247         // Let's choose m such that lhs's block fit in 1/3 of L1
00248         actual_lm = l1;
00249       }
00250       else if(l3!=0 && problem_size<=32768)
00251       {
00252         // we have both L2 and L3, and problem is small enough to be kept in L2
00253         // Let's choose m such that lhs's block fit in 1/3 of L2
00254         actual_lm = l2;
00255         max_mc = (std::min<Index>)(576,max_mc);
00256       }
00257       Index mc = (std::min<Index>)(actual_lm/(3*k*sizeof(LhsScalar)), max_mc);
00258       if (mc > Traits::mr) mc -= mc % Traits::mr;
00259       else if (mc==0) return;
00260       m = (m%mc)==0 ? mc
00261                     : (mc - Traits::mr * ((mc/*-1*/-(m%mc))/(Traits::mr*(m/mc+1))));
00262     }
00263   }
00264 }
00265 
00266 inline bool useSpecificBlockingSizes(Index& k, Index& m, Index& n)
00267 {
00268 #ifdef EIGEN_TEST_SPECIFIC_BLOCKING_SIZES
00269   if (EIGEN_TEST_SPECIFIC_BLOCKING_SIZES) {
00270     k = std::min<Index>(k, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_K);
00271     m = std::min<Index>(m, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_M);
00272     n = std::min<Index>(n, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_N);
00273     return true;
00274   }
00275 #else
00276   EIGEN_UNUSED_VARIABLE(k)
00277   EIGEN_UNUSED_VARIABLE(m)
00278   EIGEN_UNUSED_VARIABLE(n)
00279 #endif
00280   return false;
00281 }
00282 
00299 template<typename LhsScalar, typename RhsScalar, int KcFactor>
00300 void computeProductBlockingSizes(Index& k, Index& m, Index& n, Index num_threads = 1)
00301 {
00302   if (!useSpecificBlockingSizes(k, m, n)) {
00303     evaluateProductBlockingSizesHeuristic<LhsScalar, RhsScalar, KcFactor>(k, m, n, num_threads);
00304   }
00305 
00306   typedef gebp_traits<LhsScalar,RhsScalar> Traits;
00307   enum {
00308     kr = 8,
00309     mr = Traits::mr,
00310     nr = Traits::nr
00311   };
00312   if (k > kr) k -= k % kr;
00313   if (m > mr) m -= m % mr;
00314   if (n > nr) n -= n % nr;
00315 }
00316 
00317 template<typename LhsScalar, typename RhsScalar>
00318 inline void computeProductBlockingSizes(Index& k, Index& m, Index& n, Index num_threads = 1)
00319 {
00320   computeProductBlockingSizes<LhsScalar,RhsScalar,1>(k, m, n, num_threads);
00321 }
00322 
00323 #ifdef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD
00324   #define CJMADD(CJ,A,B,C,T)  C = CJ.pmadd(A,B,C);
00325 #else
00326 
00327   // FIXME (a bit overkill maybe ?)
00328 
00329   template<typename CJ, typename A, typename B, typename C, typename T> struct gebp_madd_selector {
00330     EIGEN_ALWAYS_INLINE static void run(const CJ& cj, A& a, B& b, C& c, T& /*t*/)
00331     {
00332       c = cj.pmadd(a,b,c);
00333     }
00334   };
00335 
00336   template<typename CJ, typename T> struct gebp_madd_selector<CJ,T,T,T,T> {
00337     EIGEN_ALWAYS_INLINE static void run(const CJ& cj, T& a, T& b, T& c, T& t)
00338     {
00339       t = b; t = cj.pmul(a,t); c = padd(c,t);
00340     }
00341   };
00342 
00343   template<typename CJ, typename A, typename B, typename C, typename T>
00344   EIGEN_STRONG_INLINE void gebp_madd(const CJ& cj, A& a, B& b, C& c, T& t)
00345   {
00346     gebp_madd_selector<CJ,A,B,C,T>::run(cj,a,b,c,t);
00347   }
00348 
00349   #define CJMADD(CJ,A,B,C,T)  gebp_madd(CJ,A,B,C,T);
00350 //   #define CJMADD(CJ,A,B,C,T)  T = B; T = CJ.pmul(A,T); C = padd(C,T);
00351 #endif
00352 
00353 /* Vectorization logic
00354  *  real*real: unpack rhs to constant packets, ...
00355  * 
00356  *  cd*cd : unpack rhs to (b_r,b_r), (b_i,b_i), mul to get (a_r b_r,a_i b_r) (a_r b_i,a_i b_i),
00357  *          storing each res packet into two packets (2x2),
00358  *          at the end combine them: swap the second and addsub them 
00359  *  cf*cf : same but with 2x4 blocks
00360  *  cplx*real : unpack rhs to constant packets, ...
00361  *  real*cplx : load lhs as (a0,a0,a1,a1), and mul as usual
00362  */
00363 template<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs, bool _ConjRhs>
00364 class gebp_traits
00365 {
00366 public:
00367   typedef _LhsScalar LhsScalar;
00368   typedef _RhsScalar RhsScalar;
00369   typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
00370 
00371   enum {
00372     ConjLhs = _ConjLhs,
00373     ConjRhs = _ConjRhs,
00374     Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
00375     LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
00376     RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
00377     ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
00378     
00379     NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
00380 
00381     // register block size along the N direction must be 1 or 4
00382     nr = 4,
00383 
00384     // register block size along the M direction (currently, this one cannot be modified)
00385     default_mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*LhsPacketSize,
00386 #if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD) && !defined(EIGEN_VECTORIZE_ALTIVEC) && !defined(EIGEN_VECTORIZE_VSX)
00387     // we assume 16 registers
00388     // See bug 992, if the scalar type is not vectorizable but that EIGEN_HAS_SINGLE_INSTRUCTION_MADD is defined,
00389     // then using 3*LhsPacketSize triggers non-implemented paths in syrk.
00390     mr = Vectorizable ? 3*LhsPacketSize : default_mr,
00391 #else
00392     mr = default_mr,
00393 #endif
00394     
00395     LhsProgress = LhsPacketSize,
00396     RhsProgress = 1
00397   };
00398 
00399   typedef typename packet_traits<LhsScalar>::type  _LhsPacket;
00400   typedef typename packet_traits<RhsScalar>::type  _RhsPacket;
00401   typedef typename packet_traits<ResScalar>::type  _ResPacket;
00402 
00403   typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
00404   typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
00405   typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
00406 
00407   typedef ResPacket AccPacket;
00408   
00409   EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
00410   {
00411     p = pset1<ResPacket>(ResScalar(0));
00412   }
00413   
00414   EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
00415   {
00416     pbroadcast4(b, b0, b1, b2, b3);
00417   }
00418   
00419 //   EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1)
00420 //   {
00421 //     pbroadcast2(b, b0, b1);
00422 //   }
00423   
00424   template<typename RhsPacketType>
00425   EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketType& dest) const
00426   {
00427     dest = pset1<RhsPacketType>(*b);
00428   }
00429   
00430   EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const
00431   {
00432     dest = ploadquad<RhsPacket>(b);
00433   }
00434 
00435   template<typename LhsPacketType>
00436   EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacketType& dest) const
00437   {
00438     dest = pload<LhsPacketType>(a);
00439   }
00440 
00441   template<typename LhsPacketType>
00442   EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacketType& dest) const
00443   {
00444     dest = ploadu<LhsPacketType>(a);
00445   }
00446 
00447   template<typename LhsPacketType, typename RhsPacketType, typename AccPacketType>
00448   EIGEN_STRONG_INLINE void madd(const LhsPacketType& a, const RhsPacketType& b, AccPacketType& c, AccPacketType& tmp) const
00449   {
00450     // It would be a lot cleaner to call pmadd all the time. Unfortunately if we
00451     // let gcc allocate the register in which to store the result of the pmul
00452     // (in the case where there is no FMA) gcc fails to figure out how to avoid
00453     // spilling register.
00454 #ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
00455     EIGEN_UNUSED_VARIABLE(tmp);
00456     c = pmadd(a,b,c);
00457 #else
00458     tmp = b; tmp = pmul(a,tmp); c = padd(c,tmp);
00459 #endif
00460   }
00461 
00462   EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
00463   {
00464     r = pmadd(c,alpha,r);
00465   }
00466   
00467   template<typename ResPacketHalf>
00468   EIGEN_STRONG_INLINE void acc(const ResPacketHalf& c, const ResPacketHalf& alpha, ResPacketHalf& r) const
00469   {
00470     r = pmadd(c,alpha,r);
00471   }
00472 
00473 protected:
00474 //   conj_helper<LhsScalar,RhsScalar,ConjLhs,ConjRhs> cj;
00475 //   conj_helper<LhsPacket,RhsPacket,ConjLhs,ConjRhs> pcj;
00476 };
00477 
00478 template<typename RealScalar, bool _ConjLhs>
00479 class gebp_traits<std::complex<RealScalar>, RealScalar, _ConjLhs, false>
00480 {
00481 public:
00482   typedef std::complex<RealScalar> LhsScalar;
00483   typedef RealScalar RhsScalar;
00484   typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
00485 
00486   enum {
00487     ConjLhs = _ConjLhs,
00488     ConjRhs = false,
00489     Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
00490     LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
00491     RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
00492     ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
00493     
00494     NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
00495     nr = 4,
00496 #if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD) && !defined(EIGEN_VECTORIZE_ALTIVEC) && !defined(EIGEN_VECTORIZE_VSX)
00497     // we assume 16 registers
00498     mr = 3*LhsPacketSize,
00499 #else
00500     mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*LhsPacketSize,
00501 #endif
00502 
00503     LhsProgress = LhsPacketSize,
00504     RhsProgress = 1
00505   };
00506 
00507   typedef typename packet_traits<LhsScalar>::type  _LhsPacket;
00508   typedef typename packet_traits<RhsScalar>::type  _RhsPacket;
00509   typedef typename packet_traits<ResScalar>::type  _ResPacket;
00510 
00511   typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
00512   typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
00513   typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
00514 
00515   typedef ResPacket AccPacket;
00516 
00517   EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
00518   {
00519     p = pset1<ResPacket>(ResScalar(0));
00520   }
00521 
00522   EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
00523   {
00524     dest = pset1<RhsPacket>(*b);
00525   }
00526   
00527   EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const
00528   {
00529     dest = pset1<RhsPacket>(*b);
00530   }
00531 
00532   EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
00533   {
00534     dest = pload<LhsPacket>(a);
00535   }
00536 
00537   EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const
00538   {
00539     dest = ploadu<LhsPacket>(a);
00540   }
00541 
00542   EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
00543   {
00544     pbroadcast4(b, b0, b1, b2, b3);
00545   }
00546   
00547 //   EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1)
00548 //   {
00549 //     pbroadcast2(b, b0, b1);
00550 //   }
00551 
00552   EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const
00553   {
00554     madd_impl(a, b, c, tmp, typename conditional<Vectorizable,true_type,false_type>::type());
00555   }
00556 
00557   EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const
00558   {
00559 #ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
00560     EIGEN_UNUSED_VARIABLE(tmp);
00561     c.v = pmadd(a.v,b,c.v);
00562 #else
00563     tmp = b; tmp = pmul(a.v,tmp); c.v = padd(c.v,tmp);
00564 #endif
00565   }
00566 
00567   EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const false_type&) const
00568   {
00569     c += a * b;
00570   }
00571 
00572   EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
00573   {
00574     r = cj.pmadd(c,alpha,r);
00575   }
00576 
00577 protected:
00578   conj_helper<ResPacket,ResPacket,ConjLhs,false> cj;
00579 };
00580 
00581 template<typename Packet>
00582 struct DoublePacket
00583 {
00584   Packet first;
00585   Packet second;
00586 };
00587 
00588 template<typename Packet>
00589 DoublePacket<Packet> padd(const DoublePacket<Packet> &a, const DoublePacket<Packet> &b)
00590 {
00591   DoublePacket<Packet> res;
00592   res.first  = padd(a.first, b.first);
00593   res.second = padd(a.second,b.second);
00594   return res;
00595 }
00596 
00597 template<typename Packet>
00598 const DoublePacket<Packet>& predux4(const DoublePacket<Packet> &a)
00599 {
00600   return a;
00601 }
00602 
00603 template<typename Packet> struct unpacket_traits<DoublePacket<Packet> > { typedef DoublePacket<Packet> half; };
00604 // template<typename Packet>
00605 // DoublePacket<Packet> pmadd(const DoublePacket<Packet> &a, const DoublePacket<Packet> &b)
00606 // {
00607 //   DoublePacket<Packet> res;
00608 //   res.first  = padd(a.first, b.first);
00609 //   res.second = padd(a.second,b.second);
00610 //   return res;
00611 // }
00612 
00613 template<typename RealScalar, bool _ConjLhs, bool _ConjRhs>
00614 class gebp_traits<std::complex<RealScalar>, std::complex<RealScalar>, _ConjLhs, _ConjRhs >
00615 {
00616 public:
00617   typedef std::complex<RealScalar>  Scalar;
00618   typedef std::complex<RealScalar>  LhsScalar;
00619   typedef std::complex<RealScalar>  RhsScalar;
00620   typedef std::complex<RealScalar>  ResScalar;
00621   
00622   enum {
00623     ConjLhs = _ConjLhs,
00624     ConjRhs = _ConjRhs,
00625     Vectorizable = packet_traits<RealScalar>::Vectorizable
00626                 && packet_traits<Scalar>::Vectorizable,
00627     RealPacketSize  = Vectorizable ? packet_traits<RealScalar>::size : 1,
00628     ResPacketSize   = Vectorizable ? packet_traits<ResScalar>::size : 1,
00629     LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
00630     RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
00631 
00632     // FIXME: should depend on NumberOfRegisters
00633     nr = 4,
00634     mr = ResPacketSize,
00635 
00636     LhsProgress = ResPacketSize,
00637     RhsProgress = 1
00638   };
00639   
00640   typedef typename packet_traits<RealScalar>::type RealPacket;
00641   typedef typename packet_traits<Scalar>::type     ScalarPacket;
00642   typedef DoublePacket<RealPacket> DoublePacketType;
00643 
00644   typedef typename conditional<Vectorizable,RealPacket,  Scalar>::type LhsPacket;
00645   typedef typename conditional<Vectorizable,DoublePacketType,Scalar>::type RhsPacket;
00646   typedef typename conditional<Vectorizable,ScalarPacket,Scalar>::type ResPacket;
00647   typedef typename conditional<Vectorizable,DoublePacketType,Scalar>::type AccPacket;
00648   
00649   EIGEN_STRONG_INLINE void initAcc(Scalar& p) { p = Scalar(0); }
00650 
00651   EIGEN_STRONG_INLINE void initAcc(DoublePacketType& p)
00652   {
00653     p.first   = pset1<RealPacket>(RealScalar(0));
00654     p.second  = pset1<RealPacket>(RealScalar(0));
00655   }
00656 
00657   // Scalar path
00658   EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, ResPacket& dest) const
00659   {
00660     dest = pset1<ResPacket>(*b);
00661   }
00662 
00663   // Vectorized path
00664   EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, DoublePacketType& dest) const
00665   {
00666     dest.first  = pset1<RealPacket>(real(*b));
00667     dest.second = pset1<RealPacket>(imag(*b));
00668   }
00669   
00670   EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, ResPacket& dest) const
00671   {
00672     loadRhs(b,dest);
00673   }
00674   EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, DoublePacketType& dest) const
00675   {
00676     eigen_internal_assert(unpacket_traits<ScalarPacket>::size<=4);
00677     loadRhs(b,dest);
00678   }
00679   
00680   EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
00681   {
00682     // FIXME not sure that's the best way to implement it!
00683     loadRhs(b+0, b0);
00684     loadRhs(b+1, b1);
00685     loadRhs(b+2, b2);
00686     loadRhs(b+3, b3);
00687   }
00688   
00689   // Vectorized path
00690   EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, DoublePacketType& b0, DoublePacketType& b1)
00691   {
00692     // FIXME not sure that's the best way to implement it!
00693     loadRhs(b+0, b0);
00694     loadRhs(b+1, b1);
00695   }
00696   
00697   // Scalar path
00698   EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsScalar& b0, RhsScalar& b1)
00699   {
00700     // FIXME not sure that's the best way to implement it!
00701     loadRhs(b+0, b0);
00702     loadRhs(b+1, b1);
00703   }
00704 
00705   // nothing special here
00706   EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
00707   {
00708     dest = pload<LhsPacket>((const typename unpacket_traits<LhsPacket>::type*)(a));
00709   }
00710 
00711   EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const
00712   {
00713     dest = ploadu<LhsPacket>((const typename unpacket_traits<LhsPacket>::type*)(a));
00714   }
00715 
00716   EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, DoublePacketType& c, RhsPacket& /*tmp*/) const
00717   {
00718     c.first   = padd(pmul(a,b.first), c.first);
00719     c.second  = padd(pmul(a,b.second),c.second);
00720   }
00721 
00722   EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, ResPacket& c, RhsPacket& /*tmp*/) const
00723   {
00724     c = cj.pmadd(a,b,c);
00725   }
00726   
00727   EIGEN_STRONG_INLINE void acc(const Scalar& c, const Scalar& alpha, Scalar& r) const { r += alpha * c; }
00728   
00729   EIGEN_STRONG_INLINE void acc(const DoublePacketType& c, const ResPacket& alpha, ResPacket& r) const
00730   {
00731     // assemble c
00732     ResPacket tmp;
00733     if((!ConjLhs)&&(!ConjRhs))
00734     {
00735       tmp = pcplxflip(pconj(ResPacket(c.second)));
00736       tmp = padd(ResPacket(c.first),tmp);
00737     }
00738     else if((!ConjLhs)&&(ConjRhs))
00739     {
00740       tmp = pconj(pcplxflip(ResPacket(c.second)));
00741       tmp = padd(ResPacket(c.first),tmp);
00742     }
00743     else if((ConjLhs)&&(!ConjRhs))
00744     {
00745       tmp = pcplxflip(ResPacket(c.second));
00746       tmp = padd(pconj(ResPacket(c.first)),tmp);
00747     }
00748     else if((ConjLhs)&&(ConjRhs))
00749     {
00750       tmp = pcplxflip(ResPacket(c.second));
00751       tmp = psub(pconj(ResPacket(c.first)),tmp);
00752     }
00753     
00754     r = pmadd(tmp,alpha,r);
00755   }
00756 
00757 protected:
00758   conj_helper<LhsScalar,RhsScalar,ConjLhs,ConjRhs> cj;
00759 };
00760 
00761 template<typename RealScalar, bool _ConjRhs>
00762 class gebp_traits<RealScalar, std::complex<RealScalar>, false, _ConjRhs >
00763 {
00764 public:
00765   typedef std::complex<RealScalar>  Scalar;
00766   typedef RealScalar  LhsScalar;
00767   typedef Scalar      RhsScalar;
00768   typedef Scalar      ResScalar;
00769 
00770   enum {
00771     ConjLhs = false,
00772     ConjRhs = _ConjRhs,
00773     Vectorizable = packet_traits<RealScalar>::Vectorizable
00774                 && packet_traits<Scalar>::Vectorizable,
00775     LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
00776     RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
00777     ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
00778     
00779     NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
00780     // FIXME: should depend on NumberOfRegisters
00781     nr = 4,
00782     mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*ResPacketSize,
00783 
00784     LhsProgress = ResPacketSize,
00785     RhsProgress = 1
00786   };
00787 
00788   typedef typename packet_traits<LhsScalar>::type  _LhsPacket;
00789   typedef typename packet_traits<RhsScalar>::type  _RhsPacket;
00790   typedef typename packet_traits<ResScalar>::type  _ResPacket;
00791 
00792   typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
00793   typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
00794   typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
00795 
00796   typedef ResPacket AccPacket;
00797 
00798   EIGEN_STRONG_INLINE void initAcc(AccPacket& p)
00799   {
00800     p = pset1<ResPacket>(ResScalar(0));
00801   }
00802 
00803   EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
00804   {
00805     dest = pset1<RhsPacket>(*b);
00806   }
00807   
00808   void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)
00809   {
00810     pbroadcast4(b, b0, b1, b2, b3);
00811   }
00812   
00813 //   EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1)
00814 //   {
00815 //     // FIXME not sure that's the best way to implement it!
00816 //     b0 = pload1<RhsPacket>(b+0);
00817 //     b1 = pload1<RhsPacket>(b+1);
00818 //   }
00819 
00820   EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
00821   {
00822     dest = ploaddup<LhsPacket>(a);
00823   }
00824   
00825   EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const
00826   {
00827     eigen_internal_assert(unpacket_traits<RhsPacket>::size<=4);
00828     loadRhs(b,dest);
00829   }
00830 
00831   EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const
00832   {
00833     dest = ploaddup<LhsPacket>(a);
00834   }
00835 
00836   EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const
00837   {
00838     madd_impl(a, b, c, tmp, typename conditional<Vectorizable,true_type,false_type>::type());
00839   }
00840 
00841   EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const
00842   {
00843 #ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
00844     EIGEN_UNUSED_VARIABLE(tmp);
00845     c.v = pmadd(a,b.v,c.v);
00846 #else
00847     tmp = b; tmp.v = pmul(a,tmp.v); c = padd(c,tmp);
00848 #endif
00849     
00850   }
00851 
00852   EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const false_type&) const
00853   {
00854     c += a * b;
00855   }
00856 
00857   EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const
00858   {
00859     r = cj.pmadd(alpha,c,r);
00860   }
00861 
00862 protected:
00863   conj_helper<ResPacket,ResPacket,false,ConjRhs> cj;
00864 };
00865 
00866 // helper for the rotating kernel below
00867 template <typename GebpKernel, bool UseRotatingKernel = GebpKernel::UseRotatingKernel>
00868 struct PossiblyRotatingKernelHelper
00869 {
00870   // default implementation, not rotating
00871 
00872   typedef typename GebpKernel::Traits Traits;
00873   typedef typename Traits::RhsScalar RhsScalar;
00874   typedef typename Traits::RhsPacket RhsPacket;
00875   typedef typename Traits::AccPacket AccPacket;
00876 
00877   const Traits& traits;
00878   PossiblyRotatingKernelHelper(const Traits& t) : traits(t) {}
00879 
00880 
00881   template <size_t K, size_t Index>
00882   void loadOrRotateRhs(RhsPacket& to, const RhsScalar* from) const
00883   {
00884     traits.loadRhs(from + (Index+4*K)*Traits::RhsProgress, to);
00885   }
00886 
00887   void unrotateResult(AccPacket&,
00888                       AccPacket&,
00889                       AccPacket&,
00890                       AccPacket&)
00891   {
00892   }
00893 };
00894 
00895 // rotating implementation
00896 template <typename GebpKernel>
00897 struct PossiblyRotatingKernelHelper<GebpKernel, true>
00898 {
00899   typedef typename GebpKernel::Traits Traits;
00900   typedef typename Traits::RhsScalar RhsScalar;
00901   typedef typename Traits::RhsPacket RhsPacket;
00902   typedef typename Traits::AccPacket AccPacket;
00903 
00904   const Traits& traits;
00905   PossiblyRotatingKernelHelper(const Traits& t) : traits(t) {}
00906 
00907   template <size_t K, size_t Index>
00908   void loadOrRotateRhs(RhsPacket& to, const RhsScalar* from) const
00909   {
00910     if (Index == 0) {
00911       to = pload<RhsPacket>(from + 4*K*Traits::RhsProgress);
00912     } else {
00913       EIGEN_ASM_COMMENT("Do not reorder code, we're very tight on registers");
00914       to = protate<1>(to);
00915     }
00916   }
00917 
00918   void unrotateResult(AccPacket& res0,
00919                       AccPacket& res1,
00920                       AccPacket& res2,
00921                       AccPacket& res3)
00922   {
00923     PacketBlock<AccPacket> resblock;
00924     resblock.packet[0] = res0;
00925     resblock.packet[1] = res1;
00926     resblock.packet[2] = res2;
00927     resblock.packet[3] = res3;
00928     ptranspose(resblock);
00929     resblock.packet[3] = protate<1>(resblock.packet[3]);
00930     resblock.packet[2] = protate<2>(resblock.packet[2]);
00931     resblock.packet[1] = protate<3>(resblock.packet[1]);
00932     ptranspose(resblock);
00933     res0 = resblock.packet[0];
00934     res1 = resblock.packet[1];
00935     res2 = resblock.packet[2];
00936     res3 = resblock.packet[3];
00937   }
00938 };
00939 
00940 /* optimized GEneral packed Block * packed Panel product kernel
00941  *
00942  * Mixing type logic: C += A * B
00943  *  |  A  |  B  | comments
00944  *  |real |cplx | no vectorization yet, would require to pack A with duplication
00945  *  |cplx |real | easy vectorization
00946  */
00947 template<typename LhsScalar, typename RhsScalar, typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
00948 struct gebp_kernel
00949 {
00950   typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> Traits;
00951   typedef typename Traits::ResScalar ResScalar;
00952   typedef typename Traits::LhsPacket LhsPacket;
00953   typedef typename Traits::RhsPacket RhsPacket;
00954   typedef typename Traits::ResPacket ResPacket;
00955   typedef typename Traits::AccPacket AccPacket;
00956 
00957   typedef gebp_traits<RhsScalar,LhsScalar,ConjugateRhs,ConjugateLhs> SwappedTraits;
00958   typedef typename SwappedTraits::ResScalar SResScalar;
00959   typedef typename SwappedTraits::LhsPacket SLhsPacket;
00960   typedef typename SwappedTraits::RhsPacket SRhsPacket;
00961   typedef typename SwappedTraits::ResPacket SResPacket;
00962   typedef typename SwappedTraits::AccPacket SAccPacket;
00963 
00964   typedef typename DataMapper::LinearMapper LinearMapper;
00965 
00966   enum {
00967     Vectorizable  = Traits::Vectorizable,
00968     LhsProgress   = Traits::LhsProgress,
00969     RhsProgress   = Traits::RhsProgress,
00970     ResPacketSize = Traits::ResPacketSize
00971   };
00972 
00973 
00974   static const bool UseRotatingKernel =
00975     EIGEN_ARCH_ARM &&
00976     internal::is_same<LhsScalar, float>::value &&
00977     internal::is_same<RhsScalar, float>::value &&
00978     internal::is_same<ResScalar, float>::value &&
00979     Traits::LhsPacketSize == 4 &&
00980     Traits::RhsPacketSize == 4 &&
00981     Traits::ResPacketSize == 4;
00982 
00983   EIGEN_DONT_INLINE
00984   void operator()(const DataMapper& res, const LhsScalar* blockA, const RhsScalar* blockB,
00985                   Index rows, Index depth, Index cols, ResScalar alpha,
00986                   Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0);
00987 };
00988 
00989 template<typename LhsScalar, typename RhsScalar, typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
00990 EIGEN_DONT_INLINE
00991 void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,ConjugateRhs>
00992   ::operator()(const DataMapper& res, const LhsScalar* blockA, const RhsScalar* blockB,
00993                Index rows, Index depth, Index cols, ResScalar alpha,
00994                Index strideA, Index strideB, Index offsetA, Index offsetB)
00995   {
00996     Traits traits;
00997     SwappedTraits straits;
00998     
00999     if(strideA==-1) strideA = depth;
01000     if(strideB==-1) strideB = depth;
01001     conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
01002     Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
01003     const Index peeled_mc3 = mr>=3*Traits::LhsProgress ? (rows/(3*LhsProgress))*(3*LhsProgress) : 0;
01004     const Index peeled_mc2 = mr>=2*Traits::LhsProgress ? peeled_mc3+((rows-peeled_mc3)/(2*LhsProgress))*(2*LhsProgress) : 0;
01005     const Index peeled_mc1 = mr>=1*Traits::LhsProgress ? (rows/(1*LhsProgress))*(1*LhsProgress) : 0;
01006     enum { pk = 8 }; // NOTE Such a large peeling factor is important for large matrices (~ +5% when >1000 on Haswell)
01007     const Index peeled_kc  = depth & ~(pk-1);
01008     const Index prefetch_res_offset = 32/sizeof(ResScalar);    
01009 //     const Index depth2     = depth & ~1;
01010 
01011     //---------- Process 3 * LhsProgress rows at once ----------
01012     // This corresponds to 3*LhsProgress x nr register blocks.
01013     // Usually, make sense only with FMA
01014     if(mr>=3*Traits::LhsProgress)
01015     {      
01016       PossiblyRotatingKernelHelper<gebp_kernel> possiblyRotatingKernelHelper(traits);
01017       
01018       // Here, the general idea is to loop on each largest micro horizontal panel of the lhs (3*Traits::LhsProgress x depth)
01019       // and on each largest micro vertical panel of the rhs (depth * nr).
01020       // Blocking sizes, i.e., 'depth' has been computed so that the micro horizontal panel of the lhs fit in L1.
01021       // However, if depth is too small, we can extend the number of rows of these horizontal panels.
01022       // This actual number of rows is computed as follow:
01023       const Index l1 = defaultL1CacheSize; // in Bytes, TODO, l1 should be passed to this function.
01024       // The max(1, ...) here is needed because we may be using blocking params larger than what our known l1 cache size
01025       // suggests we should be using: either because our known l1 cache size is inaccurate (e.g. on Android, we can only guess),
01026       // or because we are testing specific blocking sizes.
01027       const Index actual_panel_rows = (3*LhsProgress) * std::max<Index>(1,( (l1 - sizeof(ResScalar)*mr*nr - depth*nr*sizeof(RhsScalar)) / (depth * sizeof(LhsScalar) * 3*LhsProgress) ));
01028       for(Index i1=0; i1<peeled_mc3; i1+=actual_panel_rows)
01029       {
01030         const Index actual_panel_end = (std::min)(i1+actual_panel_rows, peeled_mc3);
01031         for(Index j2=0; j2<packet_cols4; j2+=nr)
01032         {
01033           for(Index i=i1; i<actual_panel_end; i+=3*LhsProgress)
01034           {
01035           
01036           // We selected a 3*Traits::LhsProgress x nr micro block of res which is entirely
01037           // stored into 3 x nr registers.
01038           
01039           const LhsScalar* blA = &blockA[i*strideA+offsetA*(3*LhsProgress)];
01040           prefetch(&blA[0]);
01041 
01042           // gets res block as register
01043           AccPacket C0, C1, C2,  C3,
01044                     C4, C5, C6,  C7,
01045                     C8, C9, C10, C11;
01046           traits.initAcc(C0);  traits.initAcc(C1);  traits.initAcc(C2);  traits.initAcc(C3);
01047           traits.initAcc(C4);  traits.initAcc(C5);  traits.initAcc(C6);  traits.initAcc(C7);
01048           traits.initAcc(C8);  traits.initAcc(C9);  traits.initAcc(C10); traits.initAcc(C11);
01049 
01050           LinearMapper r0 = res.getLinearMapper(i, j2 + 0);
01051           LinearMapper r1 = res.getLinearMapper(i, j2 + 1);
01052           LinearMapper r2 = res.getLinearMapper(i, j2 + 2);
01053           LinearMapper r3 = res.getLinearMapper(i, j2 + 3);
01054 
01055           r0.prefetch(0);
01056           r1.prefetch(0);
01057           r2.prefetch(0);
01058           r3.prefetch(0);
01059 
01060           // performs "inner" products
01061           const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
01062           prefetch(&blB[0]);
01063           LhsPacket A0, A1;
01064 
01065           for(Index k=0; k<peeled_kc; k+=pk)
01066           {
01067             EIGEN_ASM_COMMENT("begin gebp micro kernel 3pX4");
01068             RhsPacket B_0, T0;
01069             LhsPacket A2;
01070 
01071 #define EIGEN_GEBP_ONESTEP(K) \
01072             do { \
01073               EIGEN_ASM_COMMENT("begin step of gebp micro kernel 3pX4"); \
01074               EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
01075               internal::prefetch(blA+(3*K+16)*LhsProgress); \
01076               if (EIGEN_ARCH_ARM) internal::prefetch(blB+(4*K+16)*RhsProgress); /* Bug 953 */ \
01077               traits.loadLhs(&blA[(0+3*K)*LhsProgress], A0);  \
01078               traits.loadLhs(&blA[(1+3*K)*LhsProgress], A1);  \
01079               traits.loadLhs(&blA[(2+3*K)*LhsProgress], A2);  \
01080               possiblyRotatingKernelHelper.template loadOrRotateRhs<K, 0>(B_0, blB); \
01081               traits.madd(A0, B_0, C0, T0); \
01082               traits.madd(A1, B_0, C4, T0); \
01083               traits.madd(A2, B_0, C8, B_0); \
01084               possiblyRotatingKernelHelper.template loadOrRotateRhs<K, 1>(B_0, blB); \
01085               traits.madd(A0, B_0, C1, T0); \
01086               traits.madd(A1, B_0, C5, T0); \
01087               traits.madd(A2, B_0, C9, B_0); \
01088               possiblyRotatingKernelHelper.template loadOrRotateRhs<K, 2>(B_0, blB); \
01089               traits.madd(A0, B_0, C2,  T0); \
01090               traits.madd(A1, B_0, C6,  T0); \
01091               traits.madd(A2, B_0, C10, B_0); \
01092               possiblyRotatingKernelHelper.template loadOrRotateRhs<K, 3>(B_0, blB); \
01093               traits.madd(A0, B_0, C3 , T0); \
01094               traits.madd(A1, B_0, C7,  T0); \
01095               traits.madd(A2, B_0, C11, B_0); \
01096               EIGEN_ASM_COMMENT("end step of gebp micro kernel 3pX4"); \
01097             } while(false)
01098 
01099             internal::prefetch(blB);
01100             EIGEN_GEBP_ONESTEP(0);
01101             EIGEN_GEBP_ONESTEP(1);
01102             EIGEN_GEBP_ONESTEP(2);
01103             EIGEN_GEBP_ONESTEP(3);
01104             EIGEN_GEBP_ONESTEP(4);
01105             EIGEN_GEBP_ONESTEP(5);
01106             EIGEN_GEBP_ONESTEP(6);
01107             EIGEN_GEBP_ONESTEP(7);
01108 
01109             blB += pk*4*RhsProgress;
01110             blA += pk*3*Traits::LhsProgress;
01111 
01112             EIGEN_ASM_COMMENT("end gebp micro kernel 3pX4");
01113           }
01114           // process remaining peeled loop
01115           for(Index k=peeled_kc; k<depth; k++)
01116           {
01117             RhsPacket B_0, T0;
01118             LhsPacket A2;
01119             EIGEN_GEBP_ONESTEP(0);
01120             blB += 4*RhsProgress;
01121             blA += 3*Traits::LhsProgress;
01122           }
01123 
01124 #undef EIGEN_GEBP_ONESTEP
01125 
01126           possiblyRotatingKernelHelper.unrotateResult(C0, C1, C2, C3);
01127           possiblyRotatingKernelHelper.unrotateResult(C4, C5, C6, C7);
01128           possiblyRotatingKernelHelper.unrotateResult(C8, C9, C10, C11);
01129 
01130           ResPacket R0, R1, R2;
01131           ResPacket alphav = pset1<ResPacket>(alpha);
01132 
01133           R0 = r0.loadPacket(0 * Traits::ResPacketSize);
01134           R1 = r0.loadPacket(1 * Traits::ResPacketSize);
01135           R2 = r0.loadPacket(2 * Traits::ResPacketSize);
01136           traits.acc(C0, alphav, R0);
01137           traits.acc(C4, alphav, R1);
01138           traits.acc(C8, alphav, R2);
01139           r0.storePacket(0 * Traits::ResPacketSize, R0);
01140           r0.storePacket(1 * Traits::ResPacketSize, R1);
01141           r0.storePacket(2 * Traits::ResPacketSize, R2);
01142 
01143           R0 = r1.loadPacket(0 * Traits::ResPacketSize);
01144           R1 = r1.loadPacket(1 * Traits::ResPacketSize);
01145           R2 = r1.loadPacket(2 * Traits::ResPacketSize);
01146           traits.acc(C1, alphav, R0);
01147           traits.acc(C5, alphav, R1);
01148           traits.acc(C9, alphav, R2);
01149           r1.storePacket(0 * Traits::ResPacketSize, R0);
01150           r1.storePacket(1 * Traits::ResPacketSize, R1);
01151           r1.storePacket(2 * Traits::ResPacketSize, R2);
01152 
01153           R0 = r2.loadPacket(0 * Traits::ResPacketSize);
01154           R1 = r2.loadPacket(1 * Traits::ResPacketSize);
01155           R2 = r2.loadPacket(2 * Traits::ResPacketSize);
01156           traits.acc(C2, alphav, R0);
01157           traits.acc(C6, alphav, R1);
01158           traits.acc(C10, alphav, R2);
01159           r2.storePacket(0 * Traits::ResPacketSize, R0);
01160           r2.storePacket(1 * Traits::ResPacketSize, R1);
01161           r2.storePacket(2 * Traits::ResPacketSize, R2);
01162 
01163           R0 = r3.loadPacket(0 * Traits::ResPacketSize);
01164           R1 = r3.loadPacket(1 * Traits::ResPacketSize);
01165           R2 = r3.loadPacket(2 * Traits::ResPacketSize);
01166           traits.acc(C3, alphav, R0);
01167           traits.acc(C7, alphav, R1);
01168           traits.acc(C11, alphav, R2);
01169           r3.storePacket(0 * Traits::ResPacketSize, R0);
01170           r3.storePacket(1 * Traits::ResPacketSize, R1);
01171           r3.storePacket(2 * Traits::ResPacketSize, R2);          
01172           }
01173         }
01174 
01175         // Deal with remaining columns of the rhs
01176         for(Index j2=packet_cols4; j2<cols; j2++)
01177         {
01178           for(Index i=i1; i<actual_panel_end; i+=3*LhsProgress)
01179           {
01180           // One column at a time
01181           const LhsScalar* blA = &blockA[i*strideA+offsetA*(3*Traits::LhsProgress)];
01182           prefetch(&blA[0]);
01183 
01184           // gets res block as register
01185           AccPacket C0, C4, C8;
01186           traits.initAcc(C0);
01187           traits.initAcc(C4);
01188           traits.initAcc(C8);
01189 
01190           LinearMapper r0 = res.getLinearMapper(i, j2);
01191           r0.prefetch(0);
01192 
01193           // performs "inner" products
01194           const RhsScalar* blB = &blockB[j2*strideB+offsetB];
01195           LhsPacket A0, A1, A2;
01196           
01197           for(Index k=0; k<peeled_kc; k+=pk)
01198           {
01199             EIGEN_ASM_COMMENT("begin gebp micro kernel 3pX1");
01200             RhsPacket B_0;
01201 #define EIGEN_GEBGP_ONESTEP(K) \
01202             do { \
01203               EIGEN_ASM_COMMENT("begin step of gebp micro kernel 3pX1"); \
01204               EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
01205               traits.loadLhs(&blA[(0+3*K)*LhsProgress], A0);  \
01206               traits.loadLhs(&blA[(1+3*K)*LhsProgress], A1);  \
01207               traits.loadLhs(&blA[(2+3*K)*LhsProgress], A2);  \
01208               traits.loadRhs(&blB[(0+K)*RhsProgress], B_0);   \
01209               traits.madd(A0, B_0, C0, B_0); \
01210               traits.madd(A1, B_0, C4, B_0); \
01211               traits.madd(A2, B_0, C8, B_0); \
01212               EIGEN_ASM_COMMENT("end step of gebp micro kernel 3pX1"); \
01213             } while(false)
01214         
01215             EIGEN_GEBGP_ONESTEP(0);
01216             EIGEN_GEBGP_ONESTEP(1);
01217             EIGEN_GEBGP_ONESTEP(2);
01218             EIGEN_GEBGP_ONESTEP(3);
01219             EIGEN_GEBGP_ONESTEP(4);
01220             EIGEN_GEBGP_ONESTEP(5);
01221             EIGEN_GEBGP_ONESTEP(6);
01222             EIGEN_GEBGP_ONESTEP(7);
01223 
01224             blB += pk*RhsProgress;
01225             blA += pk*3*Traits::LhsProgress;
01226 
01227             EIGEN_ASM_COMMENT("end gebp micro kernel 3pX1");
01228           }
01229 
01230           // process remaining peeled loop
01231           for(Index k=peeled_kc; k<depth; k++)
01232           {
01233             RhsPacket B_0;
01234             EIGEN_GEBGP_ONESTEP(0);
01235             blB += RhsProgress;
01236             blA += 3*Traits::LhsProgress;
01237           }
01238 #undef EIGEN_GEBGP_ONESTEP
01239           ResPacket R0, R1, R2;
01240           ResPacket alphav = pset1<ResPacket>(alpha);
01241 
01242           R0 = r0.loadPacket(0 * Traits::ResPacketSize);
01243           R1 = r0.loadPacket(1 * Traits::ResPacketSize);
01244           R2 = r0.loadPacket(2 * Traits::ResPacketSize);
01245           traits.acc(C0, alphav, R0);
01246           traits.acc(C4, alphav, R1);
01247           traits.acc(C8, alphav, R2);
01248           r0.storePacket(0 * Traits::ResPacketSize, R0);
01249           r0.storePacket(1 * Traits::ResPacketSize, R1);
01250           r0.storePacket(2 * Traits::ResPacketSize, R2);          
01251           }
01252         }
01253       }
01254     }
01255 
01256     //---------- Process 2 * LhsProgress rows at once ----------
01257     if(mr>=2*Traits::LhsProgress)
01258     {
01259       const Index l1 = defaultL1CacheSize; // in Bytes, TODO, l1 should be passed to this function.
01260       // The max(1, ...) here is needed because we may be using blocking params larger than what our known l1 cache size
01261       // suggests we should be using: either because our known l1 cache size is inaccurate (e.g. on Android, we can only guess),
01262       // or because we are testing specific blocking sizes.
01263       Index actual_panel_rows = (2*LhsProgress) * std::max<Index>(1,( (l1 - sizeof(ResScalar)*mr*nr - depth*nr*sizeof(RhsScalar)) / (depth * sizeof(LhsScalar) * 2*LhsProgress) ));
01264 
01265       for(Index i1=peeled_mc3; i1<peeled_mc2; i1+=actual_panel_rows)
01266       {
01267         Index actual_panel_end = (std::min)(i1+actual_panel_rows, peeled_mc2);
01268         for(Index j2=0; j2<packet_cols4; j2+=nr)
01269         {
01270           for(Index i=i1; i<actual_panel_end; i+=2*LhsProgress)
01271           {
01272           
01273           // We selected a 2*Traits::LhsProgress x nr micro block of res which is entirely
01274           // stored into 2 x nr registers.
01275           
01276           const LhsScalar* blA = &blockA[i*strideA+offsetA*(2*Traits::LhsProgress)];
01277           prefetch(&blA[0]);
01278 
01279           // gets res block as register
01280           AccPacket C0, C1, C2, C3,
01281                     C4, C5, C6, C7;
01282           traits.initAcc(C0); traits.initAcc(C1); traits.initAcc(C2); traits.initAcc(C3);
01283           traits.initAcc(C4); traits.initAcc(C5); traits.initAcc(C6); traits.initAcc(C7);
01284 
01285           LinearMapper r0 = res.getLinearMapper(i, j2 + 0);
01286           LinearMapper r1 = res.getLinearMapper(i, j2 + 1);
01287           LinearMapper r2 = res.getLinearMapper(i, j2 + 2);
01288           LinearMapper r3 = res.getLinearMapper(i, j2 + 3);
01289 
01290           r0.prefetch(prefetch_res_offset);
01291           r1.prefetch(prefetch_res_offset);
01292           r2.prefetch(prefetch_res_offset);
01293           r3.prefetch(prefetch_res_offset);
01294 
01295           // performs "inner" products
01296           const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
01297           prefetch(&blB[0]);
01298           LhsPacket A0, A1;
01299 
01300           for(Index k=0; k<peeled_kc; k+=pk)
01301           {
01302             EIGEN_ASM_COMMENT("begin gebp micro kernel 2pX4");
01303             RhsPacket B_0, B1, B2, B3, T0;
01304 
01305    #define EIGEN_GEBGP_ONESTEP(K) \
01306             do {                                                                \
01307               EIGEN_ASM_COMMENT("begin step of gebp micro kernel 2pX4");        \
01308               EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
01309               traits.loadLhs(&blA[(0+2*K)*LhsProgress], A0);                    \
01310               traits.loadLhs(&blA[(1+2*K)*LhsProgress], A1);                    \
01311               traits.broadcastRhs(&blB[(0+4*K)*RhsProgress], B_0, B1, B2, B3);  \
01312               traits.madd(A0, B_0, C0, T0);                                     \
01313               traits.madd(A1, B_0, C4, B_0);                                    \
01314               traits.madd(A0, B1,  C1, T0);                                     \
01315               traits.madd(A1, B1,  C5, B1);                                     \
01316               traits.madd(A0, B2,  C2, T0);                                     \
01317               traits.madd(A1, B2,  C6, B2);                                     \
01318               traits.madd(A0, B3,  C3, T0);                                     \
01319               traits.madd(A1, B3,  C7, B3);                                     \
01320               EIGEN_ASM_COMMENT("end step of gebp micro kernel 2pX4");          \
01321             } while(false)
01322             
01323             internal::prefetch(blB+(48+0));
01324             EIGEN_GEBGP_ONESTEP(0);
01325             EIGEN_GEBGP_ONESTEP(1);
01326             EIGEN_GEBGP_ONESTEP(2);
01327             EIGEN_GEBGP_ONESTEP(3);
01328             internal::prefetch(blB+(48+16));
01329             EIGEN_GEBGP_ONESTEP(4);
01330             EIGEN_GEBGP_ONESTEP(5);
01331             EIGEN_GEBGP_ONESTEP(6);
01332             EIGEN_GEBGP_ONESTEP(7);
01333 
01334             blB += pk*4*RhsProgress;
01335             blA += pk*(2*Traits::LhsProgress);
01336 
01337             EIGEN_ASM_COMMENT("end gebp micro kernel 2pX4");
01338           }
01339           // process remaining peeled loop
01340           for(Index k=peeled_kc; k<depth; k++)
01341           {
01342             RhsPacket B_0, B1, B2, B3, T0;
01343             EIGEN_GEBGP_ONESTEP(0);
01344             blB += 4*RhsProgress;
01345             blA += 2*Traits::LhsProgress;
01346           }
01347 #undef EIGEN_GEBGP_ONESTEP
01348 
01349           ResPacket R0, R1, R2, R3;
01350           ResPacket alphav = pset1<ResPacket>(alpha);
01351 
01352           R0 = r0.loadPacket(0 * Traits::ResPacketSize);
01353           R1 = r0.loadPacket(1 * Traits::ResPacketSize);
01354           R2 = r1.loadPacket(0 * Traits::ResPacketSize);
01355           R3 = r1.loadPacket(1 * Traits::ResPacketSize);
01356           traits.acc(C0, alphav, R0);
01357           traits.acc(C4, alphav, R1);
01358           traits.acc(C1, alphav, R2);
01359           traits.acc(C5, alphav, R3);
01360           r0.storePacket(0 * Traits::ResPacketSize, R0);
01361           r0.storePacket(1 * Traits::ResPacketSize, R1);
01362           r1.storePacket(0 * Traits::ResPacketSize, R2);
01363           r1.storePacket(1 * Traits::ResPacketSize, R3);
01364 
01365           R0 = r2.loadPacket(0 * Traits::ResPacketSize);
01366           R1 = r2.loadPacket(1 * Traits::ResPacketSize);
01367           R2 = r3.loadPacket(0 * Traits::ResPacketSize);
01368           R3 = r3.loadPacket(1 * Traits::ResPacketSize);
01369           traits.acc(C2,  alphav, R0);
01370           traits.acc(C6,  alphav, R1);
01371           traits.acc(C3,  alphav, R2);
01372           traits.acc(C7,  alphav, R3);
01373           r2.storePacket(0 * Traits::ResPacketSize, R0);
01374           r2.storePacket(1 * Traits::ResPacketSize, R1);
01375           r3.storePacket(0 * Traits::ResPacketSize, R2);
01376           r3.storePacket(1 * Traits::ResPacketSize, R3);
01377           }
01378         }
01379       
01380         // Deal with remaining columns of the rhs
01381         for(Index j2=packet_cols4; j2<cols; j2++)
01382         {
01383           for(Index i=i1; i<actual_panel_end; i+=2*LhsProgress)
01384           {
01385           // One column at a time
01386           const LhsScalar* blA = &blockA[i*strideA+offsetA*(2*Traits::LhsProgress)];
01387           prefetch(&blA[0]);
01388 
01389           // gets res block as register
01390           AccPacket C0, C4;
01391           traits.initAcc(C0);
01392           traits.initAcc(C4);
01393 
01394           LinearMapper r0 = res.getLinearMapper(i, j2);
01395           r0.prefetch(prefetch_res_offset);
01396 
01397           // performs "inner" products
01398           const RhsScalar* blB = &blockB[j2*strideB+offsetB];
01399           LhsPacket A0, A1;
01400 
01401           for(Index k=0; k<peeled_kc; k+=pk)
01402           {
01403             EIGEN_ASM_COMMENT("begin gebp micro kernel 2pX1");
01404             RhsPacket B_0, B1;
01405         
01406 #define EIGEN_GEBGP_ONESTEP(K) \
01407             do {                                                                  \
01408               EIGEN_ASM_COMMENT("begin step of gebp micro kernel 2pX1");          \
01409               EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
01410               traits.loadLhs(&blA[(0+2*K)*LhsProgress], A0);                      \
01411               traits.loadLhs(&blA[(1+2*K)*LhsProgress], A1);                      \
01412               traits.loadRhs(&blB[(0+K)*RhsProgress], B_0);                       \
01413               traits.madd(A0, B_0, C0, B1);                                       \
01414               traits.madd(A1, B_0, C4, B_0);                                      \
01415               EIGEN_ASM_COMMENT("end step of gebp micro kernel 2pX1");            \
01416             } while(false)
01417         
01418             EIGEN_GEBGP_ONESTEP(0);
01419             EIGEN_GEBGP_ONESTEP(1);
01420             EIGEN_GEBGP_ONESTEP(2);
01421             EIGEN_GEBGP_ONESTEP(3);
01422             EIGEN_GEBGP_ONESTEP(4);
01423             EIGEN_GEBGP_ONESTEP(5);
01424             EIGEN_GEBGP_ONESTEP(6);
01425             EIGEN_GEBGP_ONESTEP(7);
01426 
01427             blB += pk*RhsProgress;
01428             blA += pk*2*Traits::LhsProgress;
01429 
01430             EIGEN_ASM_COMMENT("end gebp micro kernel 2pX1");
01431           }
01432 
01433           // process remaining peeled loop
01434           for(Index k=peeled_kc; k<depth; k++)
01435           {
01436             RhsPacket B_0, B1;
01437             EIGEN_GEBGP_ONESTEP(0);
01438             blB += RhsProgress;
01439             blA += 2*Traits::LhsProgress;
01440           }
01441 #undef EIGEN_GEBGP_ONESTEP
01442           ResPacket R0, R1;
01443           ResPacket alphav = pset1<ResPacket>(alpha);
01444 
01445           R0 = r0.loadPacket(0 * Traits::ResPacketSize);
01446           R1 = r0.loadPacket(1 * Traits::ResPacketSize);
01447           traits.acc(C0, alphav, R0);
01448           traits.acc(C4, alphav, R1);
01449           r0.storePacket(0 * Traits::ResPacketSize, R0);
01450           r0.storePacket(1 * Traits::ResPacketSize, R1);
01451           }
01452         }
01453       }
01454     }
01455     //---------- Process 1 * LhsProgress rows at once ----------
01456     if(mr>=1*Traits::LhsProgress)
01457     {
01458       // loops on each largest micro horizontal panel of lhs (1*LhsProgress x depth)
01459       for(Index i=peeled_mc2; i<peeled_mc1; i+=1*LhsProgress)
01460       {
01461         // loops on each largest micro vertical panel of rhs (depth * nr)
01462         for(Index j2=0; j2<packet_cols4; j2+=nr)
01463         {
01464           // We select a 1*Traits::LhsProgress x nr micro block of res which is entirely
01465           // stored into 1 x nr registers.
01466           
01467           const LhsScalar* blA = &blockA[i*strideA+offsetA*(1*Traits::LhsProgress)];
01468           prefetch(&blA[0]);
01469 
01470           // gets res block as register
01471           AccPacket C0, C1, C2, C3;
01472           traits.initAcc(C0);
01473           traits.initAcc(C1);
01474           traits.initAcc(C2);
01475           traits.initAcc(C3);
01476 
01477           LinearMapper r0 = res.getLinearMapper(i, j2 + 0);
01478           LinearMapper r1 = res.getLinearMapper(i, j2 + 1);
01479           LinearMapper r2 = res.getLinearMapper(i, j2 + 2);
01480           LinearMapper r3 = res.getLinearMapper(i, j2 + 3);
01481 
01482           r0.prefetch(prefetch_res_offset);
01483           r1.prefetch(prefetch_res_offset);
01484           r2.prefetch(prefetch_res_offset);
01485           r3.prefetch(prefetch_res_offset);
01486 
01487           // performs "inner" products
01488           const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
01489           prefetch(&blB[0]);
01490           LhsPacket A0;
01491 
01492           for(Index k=0; k<peeled_kc; k+=pk)
01493           {
01494             EIGEN_ASM_COMMENT("begin gebp micro kernel 1pX4");
01495             RhsPacket B_0, B1, B2, B3;
01496                
01497 #define EIGEN_GEBGP_ONESTEP(K) \
01498             do {                                                                \
01499               EIGEN_ASM_COMMENT("begin step of gebp micro kernel 1pX4");        \
01500               EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
01501               traits.loadLhs(&blA[(0+1*K)*LhsProgress], A0);                    \
01502               traits.broadcastRhs(&blB[(0+4*K)*RhsProgress], B_0, B1, B2, B3);  \
01503               traits.madd(A0, B_0, C0, B_0);                                    \
01504               traits.madd(A0, B1,  C1, B1);                                     \
01505               traits.madd(A0, B2,  C2, B2);                                     \
01506               traits.madd(A0, B3,  C3, B3);                                     \
01507               EIGEN_ASM_COMMENT("end step of gebp micro kernel 1pX4");          \
01508             } while(false)
01509             
01510             internal::prefetch(blB+(48+0));
01511             EIGEN_GEBGP_ONESTEP(0);
01512             EIGEN_GEBGP_ONESTEP(1);
01513             EIGEN_GEBGP_ONESTEP(2);
01514             EIGEN_GEBGP_ONESTEP(3);
01515             internal::prefetch(blB+(48+16));
01516             EIGEN_GEBGP_ONESTEP(4);
01517             EIGEN_GEBGP_ONESTEP(5);
01518             EIGEN_GEBGP_ONESTEP(6);
01519             EIGEN_GEBGP_ONESTEP(7);
01520 
01521             blB += pk*4*RhsProgress;
01522             blA += pk*1*LhsProgress;
01523 
01524             EIGEN_ASM_COMMENT("end gebp micro kernel 1pX4");
01525           }
01526           // process remaining peeled loop
01527           for(Index k=peeled_kc; k<depth; k++)
01528           {
01529             RhsPacket B_0, B1, B2, B3;
01530             EIGEN_GEBGP_ONESTEP(0);
01531             blB += 4*RhsProgress;
01532             blA += 1*LhsProgress;
01533           }
01534 #undef EIGEN_GEBGP_ONESTEP
01535 
01536           ResPacket R0, R1;
01537           ResPacket alphav = pset1<ResPacket>(alpha);
01538 
01539           R0 = r0.loadPacket(0 * Traits::ResPacketSize);
01540           R1 = r1.loadPacket(0 * Traits::ResPacketSize);
01541           traits.acc(C0, alphav, R0);
01542           traits.acc(C1,  alphav, R1);
01543           r0.storePacket(0 * Traits::ResPacketSize, R0);
01544           r1.storePacket(0 * Traits::ResPacketSize, R1);
01545 
01546           R0 = r2.loadPacket(0 * Traits::ResPacketSize);
01547           R1 = r3.loadPacket(0 * Traits::ResPacketSize);
01548           traits.acc(C2,  alphav, R0);
01549           traits.acc(C3,  alphav, R1);
01550           r2.storePacket(0 * Traits::ResPacketSize, R0);
01551           r3.storePacket(0 * Traits::ResPacketSize, R1);
01552         }
01553 
01554         // Deal with remaining columns of the rhs
01555         for(Index j2=packet_cols4; j2<cols; j2++)
01556         {
01557           // One column at a time
01558           const LhsScalar* blA = &blockA[i*strideA+offsetA*(1*Traits::LhsProgress)];
01559           prefetch(&blA[0]);
01560 
01561           // gets res block as register
01562           AccPacket C0;
01563           traits.initAcc(C0);
01564 
01565           LinearMapper r0 = res.getLinearMapper(i, j2);
01566 
01567           // performs "inner" products
01568           const RhsScalar* blB = &blockB[j2*strideB+offsetB];
01569           LhsPacket A0;
01570 
01571           for(Index k=0; k<peeled_kc; k+=pk)
01572           {
01573             EIGEN_ASM_COMMENT("begin gebp micro kernel 1pX1");
01574             RhsPacket B_0;
01575         
01576 #define EIGEN_GEBGP_ONESTEP(K) \
01577             do {                                                                \
01578               EIGEN_ASM_COMMENT("begin step of gebp micro kernel 1pX1");        \
01579               EIGEN_ASM_COMMENT("Note: these asm comments work around bug 935!"); \
01580               traits.loadLhs(&blA[(0+1*K)*LhsProgress], A0);                    \
01581               traits.loadRhs(&blB[(0+K)*RhsProgress], B_0);                     \
01582               traits.madd(A0, B_0, C0, B_0);                                    \
01583               EIGEN_ASM_COMMENT("end step of gebp micro kernel 1pX1");          \
01584             } while(false);
01585 
01586             EIGEN_GEBGP_ONESTEP(0);
01587             EIGEN_GEBGP_ONESTEP(1);
01588             EIGEN_GEBGP_ONESTEP(2);
01589             EIGEN_GEBGP_ONESTEP(3);
01590             EIGEN_GEBGP_ONESTEP(4);
01591             EIGEN_GEBGP_ONESTEP(5);
01592             EIGEN_GEBGP_ONESTEP(6);
01593             EIGEN_GEBGP_ONESTEP(7);
01594 
01595             blB += pk*RhsProgress;
01596             blA += pk*1*Traits::LhsProgress;
01597 
01598             EIGEN_ASM_COMMENT("end gebp micro kernel 1pX1");
01599           }
01600 
01601           // process remaining peeled loop
01602           for(Index k=peeled_kc; k<depth; k++)
01603           {
01604             RhsPacket B_0;
01605             EIGEN_GEBGP_ONESTEP(0);
01606             blB += RhsProgress;
01607             blA += 1*Traits::LhsProgress;
01608           }
01609 #undef EIGEN_GEBGP_ONESTEP
01610           ResPacket R0;
01611           ResPacket alphav = pset1<ResPacket>(alpha);
01612           R0 = r0.loadPacket(0 * Traits::ResPacketSize);
01613           traits.acc(C0, alphav, R0);
01614           r0.storePacket(0 * Traits::ResPacketSize, R0);
01615         }
01616       }
01617     }
01618     //---------- Process remaining rows, 1 at once ----------
01619     if(peeled_mc1<rows)
01620     {
01621       // loop on each panel of the rhs
01622       for(Index j2=0; j2<packet_cols4; j2+=nr)
01623       {
01624         // loop on each row of the lhs (1*LhsProgress x depth)
01625         for(Index i=peeled_mc1; i<rows; i+=1)
01626         {
01627           const LhsScalar* blA = &blockA[i*strideA+offsetA];
01628           prefetch(&blA[0]);
01629           const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
01630 
01631           if( (SwappedTraits::LhsProgress % 4)==0 )
01632           {
01633             // NOTE The following piece of code wont work for 512 bit registers
01634             SAccPacket C0, C1, C2, C3;
01635             straits.initAcc(C0);
01636             straits.initAcc(C1);
01637             straits.initAcc(C2);
01638             straits.initAcc(C3);
01639 
01640             const Index spk   = (std::max)(1,SwappedTraits::LhsProgress/4);
01641             const Index endk  = (depth/spk)*spk;
01642             const Index endk4 = (depth/(spk*4))*(spk*4);
01643 
01644             Index k=0;
01645             for(; k<endk4; k+=4*spk)
01646             {
01647               SLhsPacket A0,A1;
01648               SRhsPacket B_0,B_1;
01649 
01650               straits.loadLhsUnaligned(blB+0*SwappedTraits::LhsProgress, A0);
01651               straits.loadLhsUnaligned(blB+1*SwappedTraits::LhsProgress, A1);
01652 
01653               straits.loadRhsQuad(blA+0*spk, B_0);
01654               straits.loadRhsQuad(blA+1*spk, B_1);
01655               straits.madd(A0,B_0,C0,B_0);
01656               straits.madd(A1,B_1,C1,B_1);
01657 
01658               straits.loadLhsUnaligned(blB+2*SwappedTraits::LhsProgress, A0);
01659               straits.loadLhsUnaligned(blB+3*SwappedTraits::LhsProgress, A1);
01660               straits.loadRhsQuad(blA+2*spk, B_0);
01661               straits.loadRhsQuad(blA+3*spk, B_1);
01662               straits.madd(A0,B_0,C2,B_0);
01663               straits.madd(A1,B_1,C3,B_1);
01664 
01665               blB += 4*SwappedTraits::LhsProgress;
01666               blA += 4*spk;
01667             }
01668             C0 = padd(padd(C0,C1),padd(C2,C3));
01669             for(; k<endk; k+=spk)
01670             {
01671               SLhsPacket A0;
01672               SRhsPacket B_0;
01673 
01674               straits.loadLhsUnaligned(blB, A0);
01675               straits.loadRhsQuad(blA, B_0);
01676               straits.madd(A0,B_0,C0,B_0);
01677 
01678               blB += SwappedTraits::LhsProgress;
01679               blA += spk;
01680             }
01681             if(SwappedTraits::LhsProgress==8)
01682             {
01683               // Special case where we have to first reduce the accumulation register C0
01684               typedef typename conditional<SwappedTraits::LhsProgress==8,typename unpacket_traits<SResPacket>::half,SResPacket>::type SResPacketHalf;
01685               typedef typename conditional<SwappedTraits::LhsProgress==8,typename unpacket_traits<SLhsPacket>::half,SLhsPacket>::type SLhsPacketHalf;
01686               typedef typename conditional<SwappedTraits::LhsProgress==8,typename unpacket_traits<SLhsPacket>::half,SRhsPacket>::type SRhsPacketHalf;
01687               typedef typename conditional<SwappedTraits::LhsProgress==8,typename unpacket_traits<SAccPacket>::half,SAccPacket>::type SAccPacketHalf;
01688 
01689               SResPacketHalf R = res.template gatherPacket<SResPacketHalf>(i, j2);
01690               SResPacketHalf alphav = pset1<SResPacketHalf>(alpha);
01691 
01692               if(depth-endk>0)
01693               {
01694                 // We have to handle the last row of the rhs which corresponds to a half-packet
01695                 SLhsPacketHalf a0;
01696                 SRhsPacketHalf b0;
01697                 straits.loadLhsUnaligned(blB, a0);
01698                 straits.loadRhs(blA, b0);
01699                 SAccPacketHalf c0 = predux4(C0);
01700                 straits.madd(a0,b0,c0,b0);
01701                 straits.acc(c0, alphav, R);
01702               }
01703               else
01704               {
01705                 straits.acc(predux4(C0), alphav, R);
01706               }
01707               res.scatterPacket(i, j2, R);
01708             }
01709             else
01710             {
01711               SResPacket R = res.template gatherPacket<SResPacket>(i, j2);
01712               SResPacket alphav = pset1<SResPacket>(alpha);
01713               straits.acc(C0, alphav, R);
01714               res.scatterPacket(i, j2, R);
01715             }
01716           }
01717           else // scalar path
01718           {
01719             // get a 1 x 4 res block as registers
01720             ResScalar C0(0), C1(0), C2(0), C3(0);
01721 
01722             for(Index k=0; k<depth; k++)
01723             {
01724               LhsScalar A0;
01725               RhsScalar B_0, B_1;
01726 
01727               A0 = blA[k];
01728 
01729               B_0 = blB[0];
01730               B_1 = blB[1];
01731               CJMADD(cj,A0,B_0,C0,  B_0);
01732               CJMADD(cj,A0,B_1,C1,  B_1);
01733               
01734               B_0 = blB[2];
01735               B_1 = blB[3];
01736               CJMADD(cj,A0,B_0,C2,  B_0);
01737               CJMADD(cj,A0,B_1,C3,  B_1);
01738               
01739               blB += 4;
01740             }
01741             res(i, j2 + 0) += alpha * C0;
01742             res(i, j2 + 1) += alpha * C1;
01743             res(i, j2 + 2) += alpha * C2;
01744             res(i, j2 + 3) += alpha * C3;
01745           }
01746         }
01747       }
01748       // remaining columns
01749       for(Index j2=packet_cols4; j2<cols; j2++)
01750       {
01751         // loop on each row of the lhs (1*LhsProgress x depth)
01752         for(Index i=peeled_mc1; i<rows; i+=1)
01753         {
01754           const LhsScalar* blA = &blockA[i*strideA+offsetA];
01755           prefetch(&blA[0]);
01756           // gets a 1 x 1 res block as registers
01757           ResScalar C0(0);
01758           const RhsScalar* blB = &blockB[j2*strideB+offsetB];
01759           for(Index k=0; k<depth; k++)
01760           {
01761             LhsScalar A0 = blA[k];
01762             RhsScalar B_0 = blB[k];
01763             CJMADD(cj, A0, B_0, C0, B_0);
01764           }
01765           res(i, j2) += alpha * C0;
01766         }
01767       }
01768     }
01769   }
01770 
01771 
01772 #undef CJMADD
01773 
01774 // pack a block of the lhs
01775 // The traversal is as follow (mr==4):
01776 //   0  4  8 12 ...
01777 //   1  5  9 13 ...
01778 //   2  6 10 14 ...
01779 //   3  7 11 15 ...
01780 //
01781 //  16 20 24 28 ...
01782 //  17 21 25 29 ...
01783 //  18 22 26 30 ...
01784 //  19 23 27 31 ...
01785 //
01786 //  32 33 34 35 ...
01787 //  36 36 38 39 ...
01788 template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode>
01789 struct gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, ColMajor, Conjugate, PanelMode>
01790 {
01791   typedef typename DataMapper::LinearMapper LinearMapper;
01792   EIGEN_DONT_INLINE void operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0);
01793 };
01794 
01795 template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode>
01796 EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, ColMajor, Conjugate, PanelMode>
01797   ::operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
01798 {
01799   typedef typename packet_traits<Scalar>::type Packet;
01800   enum { PacketSize = packet_traits<Scalar>::size };
01801 
01802   EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS");
01803   EIGEN_UNUSED_VARIABLE(stride);
01804   EIGEN_UNUSED_VARIABLE(offset);
01805   eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
01806   eigen_assert( ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) || (Pack1<=4) );
01807   conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
01808   Index count = 0;
01809 
01810   const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0;
01811   const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0;
01812   const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0;
01813   const Index peeled_mc0 = Pack2>=1*PacketSize ? peeled_mc1
01814                          : Pack2>1             ? (rows/Pack2)*Pack2 : 0;
01815 
01816   Index i=0;
01817 
01818   // Pack 3 packets
01819   if(Pack1>=3*PacketSize)
01820   {
01821     for(; i<peeled_mc3; i+=3*PacketSize)
01822     {
01823       if(PanelMode) count += (3*PacketSize) * offset;
01824 
01825       for(Index k=0; k<depth; k++)
01826       {
01827         Packet A, B, C;
01828         A = lhs.loadPacket(i+0*PacketSize, k);
01829         B = lhs.loadPacket(i+1*PacketSize, k);
01830         C = lhs.loadPacket(i+2*PacketSize, k);
01831         pstore(blockA+count, cj.pconj(A)); count+=PacketSize;
01832         pstore(blockA+count, cj.pconj(B)); count+=PacketSize;
01833         pstore(blockA+count, cj.pconj(C)); count+=PacketSize;
01834       }
01835       if(PanelMode) count += (3*PacketSize) * (stride-offset-depth);
01836     }
01837   }
01838   // Pack 2 packets
01839   if(Pack1>=2*PacketSize)
01840   {
01841     for(; i<peeled_mc2; i+=2*PacketSize)
01842     {
01843       if(PanelMode) count += (2*PacketSize) * offset;
01844 
01845       for(Index k=0; k<depth; k++)
01846       {
01847         Packet A, B;
01848         A = lhs.loadPacket(i+0*PacketSize, k);
01849         B = lhs.loadPacket(i+1*PacketSize, k);
01850         pstore(blockA+count, cj.pconj(A)); count+=PacketSize;
01851         pstore(blockA+count, cj.pconj(B)); count+=PacketSize;
01852       }
01853       if(PanelMode) count += (2*PacketSize) * (stride-offset-depth);
01854     }
01855   }
01856   // Pack 1 packets
01857   if(Pack1>=1*PacketSize)
01858   {
01859     for(; i<peeled_mc1; i+=1*PacketSize)
01860     {
01861       if(PanelMode) count += (1*PacketSize) * offset;
01862 
01863       for(Index k=0; k<depth; k++)
01864       {
01865         Packet A;
01866         A = lhs.loadPacket(i+0*PacketSize, k);
01867         pstore(blockA+count, cj.pconj(A));
01868         count+=PacketSize;
01869       }
01870       if(PanelMode) count += (1*PacketSize) * (stride-offset-depth);
01871     }
01872   }
01873   // Pack scalars
01874   if(Pack2<PacketSize && Pack2>1)
01875   {
01876     for(; i<peeled_mc0; i+=Pack2)
01877     {
01878       if(PanelMode) count += Pack2 * offset;
01879 
01880       for(Index k=0; k<depth; k++)
01881         for(Index w=0; w<Pack2; w++)
01882           blockA[count++] = cj(lhs(i+w, k));
01883 
01884       if(PanelMode) count += Pack2 * (stride-offset-depth);
01885     }
01886   }
01887   for(; i<rows; i++)
01888   {
01889     if(PanelMode) count += offset;
01890     for(Index k=0; k<depth; k++)
01891       blockA[count++] = cj(lhs(i, k));
01892     if(PanelMode) count += (stride-offset-depth);
01893   }
01894 }
01895 
01896 template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode>
01897 struct gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, RowMajor, Conjugate, PanelMode>
01898 {
01899   typedef typename DataMapper::LinearMapper LinearMapper;
01900   EIGEN_DONT_INLINE void operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0);
01901 };
01902 
01903 template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode>
01904 EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, RowMajor, Conjugate, PanelMode>
01905   ::operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)
01906 {
01907   typedef typename packet_traits<Scalar>::type Packet;
01908   enum { PacketSize = packet_traits<Scalar>::size };
01909 
01910   EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS");
01911   EIGEN_UNUSED_VARIABLE(stride);
01912   EIGEN_UNUSED_VARIABLE(offset);
01913   eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
01914   conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
01915   Index count = 0;
01916 
01917 //   const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0;
01918 //   const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0;
01919 //   const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0;
01920 
01921   int pack = Pack1;
01922   Index i = 0;
01923   while(pack>0)
01924   {
01925     Index remaining_rows = rows-i;
01926     Index peeled_mc = i+(remaining_rows/pack)*pack;
01927     for(; i<peeled_mc; i+=pack)
01928     {
01929       if(PanelMode) count += pack * offset;
01930 
01931       const Index peeled_k = (depth/PacketSize)*PacketSize;
01932       Index k=0;
01933       if(pack>=PacketSize)
01934       {
01935         for(; k<peeled_k; k+=PacketSize)
01936         {
01937           for (Index m = 0; m < pack; m += PacketSize)
01938           {
01939             PacketBlock<Packet> kernel;
01940             for (int p = 0; p < PacketSize; ++p) kernel.packet[p] = lhs.loadPacket(i+p+m, k);
01941             ptranspose(kernel);
01942             for (int p = 0; p < PacketSize; ++p) pstore(blockA+count+m+(pack)*p, cj.pconj(kernel.packet[p]));
01943           }
01944           count += PacketSize*pack;
01945         }
01946       }
01947       for(; k<depth; k++)
01948       {
01949         Index w=0;
01950         for(; w<pack-3; w+=4)
01951         {
01952           Scalar a(cj(lhs(i+w+0, k))),
01953                  b(cj(lhs(i+w+1, k))),
01954                  c(cj(lhs(i+w+2, k))),
01955                  d(cj(lhs(i+w+3, k)));
01956           blockA[count++] = a;
01957           blockA[count++] = b;
01958           blockA[count++] = c;
01959           blockA[count++] = d;
01960         }
01961         if(pack%4)
01962           for(;w<pack;++w)
01963             blockA[count++] = cj(lhs(i+w, k));
01964       }
01965 
01966       if(PanelMode) count += pack * (stride-offset-depth);
01967     }
01968 
01969     pack -= PacketSize;
01970     if(pack<Pack2 && (pack+PacketSize)!=Pack2)
01971       pack = Pack2;
01972   }
01973 
01974   for(; i<rows; i++)
01975   {
01976     if(PanelMode) count += offset;
01977     for(Index k=0; k<depth; k++)
01978       blockA[count++] = cj(lhs(i, k));
01979     if(PanelMode) count += (stride-offset-depth);
01980   }
01981 }
01982 
01983 // copy a complete panel of the rhs
01984 // this version is optimized for column major matrices
01985 // The traversal order is as follow: (nr==4):
01986 //  0  1  2  3   12 13 14 15   24 27
01987 //  4  5  6  7   16 17 18 19   25 28
01988 //  8  9 10 11   20 21 22 23   26 29
01989 //  .  .  .  .    .  .  .  .    .  .
01990 template<typename Scalar, typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
01991 struct gemm_pack_rhs<Scalar, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode>
01992 {
01993   typedef typename packet_traits<Scalar>::type Packet;
01994   typedef typename DataMapper::LinearMapper LinearMapper;
01995   enum { PacketSize = packet_traits<Scalar>::size };
01996   EIGEN_DONT_INLINE void operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0);
01997 };
01998 
01999 template<typename Scalar, typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
02000 EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode>
02001   ::operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset)
02002 {
02003   EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS COLMAJOR");
02004   EIGEN_UNUSED_VARIABLE(stride);
02005   EIGEN_UNUSED_VARIABLE(offset);
02006   eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
02007   conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
02008   Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;
02009   Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
02010   Index count = 0;
02011   const Index peeled_k = (depth/PacketSize)*PacketSize;
02012 //   if(nr>=8)
02013 //   {
02014 //     for(Index j2=0; j2<packet_cols8; j2+=8)
02015 //     {
02016 //       // skip what we have before
02017 //       if(PanelMode) count += 8 * offset;
02018 //       const Scalar* b0 = &rhs[(j2+0)*rhsStride];
02019 //       const Scalar* b1 = &rhs[(j2+1)*rhsStride];
02020 //       const Scalar* b2 = &rhs[(j2+2)*rhsStride];
02021 //       const Scalar* b3 = &rhs[(j2+3)*rhsStride];
02022 //       const Scalar* b4 = &rhs[(j2+4)*rhsStride];
02023 //       const Scalar* b5 = &rhs[(j2+5)*rhsStride];
02024 //       const Scalar* b6 = &rhs[(j2+6)*rhsStride];
02025 //       const Scalar* b7 = &rhs[(j2+7)*rhsStride];
02026 //       Index k=0;
02027 //       if(PacketSize==8) // TODO enbale vectorized transposition for PacketSize==4
02028 //       {
02029 //         for(; k<peeled_k; k+=PacketSize) {
02030 //           PacketBlock<Packet> kernel;
02031 //           for (int p = 0; p < PacketSize; ++p) {
02032 //             kernel.packet[p] = ploadu<Packet>(&rhs[(j2+p)*rhsStride+k]);
02033 //           }
02034 //           ptranspose(kernel);
02035 //           for (int p = 0; p < PacketSize; ++p) {
02036 //             pstoreu(blockB+count, cj.pconj(kernel.packet[p]));
02037 //             count+=PacketSize;
02038 //           }
02039 //         }
02040 //       }
02041 //       for(; k<depth; k++)
02042 //       {
02043 //         blockB[count+0] = cj(b0[k]);
02044 //         blockB[count+1] = cj(b1[k]);
02045 //         blockB[count+2] = cj(b2[k]);
02046 //         blockB[count+3] = cj(b3[k]);
02047 //         blockB[count+4] = cj(b4[k]);
02048 //         blockB[count+5] = cj(b5[k]);
02049 //         blockB[count+6] = cj(b6[k]);
02050 //         blockB[count+7] = cj(b7[k]);
02051 //         count += 8;
02052 //       }
02053 //       // skip what we have after
02054 //       if(PanelMode) count += 8 * (stride-offset-depth);
02055 //     }
02056 //   }
02057 
02058   if(nr>=4)
02059   {
02060     for(Index j2=packet_cols8; j2<packet_cols4; j2+=4)
02061     {
02062       // skip what we have before
02063       if(PanelMode) count += 4 * offset;
02064       const LinearMapper dm0 = rhs.getLinearMapper(0, j2 + 0);
02065       const LinearMapper dm1 = rhs.getLinearMapper(0, j2 + 1);
02066       const LinearMapper dm2 = rhs.getLinearMapper(0, j2 + 2);
02067       const LinearMapper dm3 = rhs.getLinearMapper(0, j2 + 3);
02068 
02069       Index k=0;
02070       if((PacketSize%4)==0) // TODO enable vectorized transposition for PacketSize==2 ??
02071       {
02072         for(; k<peeled_k; k+=PacketSize) {
02073           PacketBlock<Packet,(PacketSize%4)==0?4:PacketSize> kernel;
02074           kernel.packet[0] = dm0.loadPacket(k);
02075           kernel.packet[1%PacketSize] = dm1.loadPacket(k);
02076           kernel.packet[2%PacketSize] = dm2.loadPacket(k);
02077           kernel.packet[3%PacketSize] = dm3.loadPacket(k);
02078           ptranspose(kernel);
02079           pstoreu(blockB+count+0*PacketSize, cj.pconj(kernel.packet[0]));
02080           pstoreu(blockB+count+1*PacketSize, cj.pconj(kernel.packet[1%PacketSize]));
02081           pstoreu(blockB+count+2*PacketSize, cj.pconj(kernel.packet[2%PacketSize]));
02082           pstoreu(blockB+count+3*PacketSize, cj.pconj(kernel.packet[3%PacketSize]));
02083           count+=4*PacketSize;
02084         }
02085       }
02086       for(; k<depth; k++)
02087       {
02088         blockB[count+0] = cj(dm0(k));
02089         blockB[count+1] = cj(dm1(k));
02090         blockB[count+2] = cj(dm2(k));
02091         blockB[count+3] = cj(dm3(k));
02092         count += 4;
02093       }
02094       // skip what we have after
02095       if(PanelMode) count += 4 * (stride-offset-depth);
02096     }
02097   }
02098 
02099   // copy the remaining columns one at a time (nr==1)
02100   for(Index j2=packet_cols4; j2<cols; ++j2)
02101   {
02102     if(PanelMode) count += offset;
02103     const LinearMapper dm0 = rhs.getLinearMapper(0, j2);
02104     for(Index k=0; k<depth; k++)
02105     {
02106       blockB[count] = cj(dm0(k));
02107       count += 1;
02108     }
02109     if(PanelMode) count += (stride-offset-depth);
02110   }
02111 }
02112 
02113 // this version is optimized for row major matrices
02114 template<typename Scalar, typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
02115 struct gemm_pack_rhs<Scalar, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode>
02116 {
02117   typedef typename packet_traits<Scalar>::type Packet;
02118   typedef typename DataMapper::LinearMapper LinearMapper;
02119   enum { PacketSize = packet_traits<Scalar>::size };
02120   EIGEN_DONT_INLINE void operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0);
02121 };
02122 
02123 template<typename Scalar, typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>
02124 EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode>
02125   ::operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset)
02126 {
02127   EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS ROWMAJOR");
02128   EIGEN_UNUSED_VARIABLE(stride);
02129   EIGEN_UNUSED_VARIABLE(offset);
02130   eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
02131   conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
02132   Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;
02133   Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
02134   Index count = 0;
02135 
02136 //   if(nr>=8)
02137 //   {
02138 //     for(Index j2=0; j2<packet_cols8; j2+=8)
02139 //     {
02140 //       // skip what we have before
02141 //       if(PanelMode) count += 8 * offset;
02142 //       for(Index k=0; k<depth; k++)
02143 //       {
02144 //         if (PacketSize==8) {
02145 //           Packet A = ploadu<Packet>(&rhs[k*rhsStride + j2]);
02146 //           pstoreu(blockB+count, cj.pconj(A));
02147 //         } else if (PacketSize==4) {
02148 //           Packet A = ploadu<Packet>(&rhs[k*rhsStride + j2]);
02149 //           Packet B = ploadu<Packet>(&rhs[k*rhsStride + j2 + PacketSize]);
02150 //           pstoreu(blockB+count, cj.pconj(A));
02151 //           pstoreu(blockB+count+PacketSize, cj.pconj(B));
02152 //         } else {
02153 //           const Scalar* b0 = &rhs[k*rhsStride + j2];
02154 //           blockB[count+0] = cj(b0[0]);
02155 //           blockB[count+1] = cj(b0[1]);
02156 //           blockB[count+2] = cj(b0[2]);
02157 //           blockB[count+3] = cj(b0[3]);
02158 //           blockB[count+4] = cj(b0[4]);
02159 //           blockB[count+5] = cj(b0[5]);
02160 //           blockB[count+6] = cj(b0[6]);
02161 //           blockB[count+7] = cj(b0[7]);
02162 //         }
02163 //         count += 8;
02164 //       }
02165 //       // skip what we have after
02166 //       if(PanelMode) count += 8 * (stride-offset-depth);
02167 //     }
02168 //   }
02169   if(nr>=4)
02170   {
02171     for(Index j2=packet_cols8; j2<packet_cols4; j2+=4)
02172     {
02173       // skip what we have before
02174       if(PanelMode) count += 4 * offset;
02175       for(Index k=0; k<depth; k++)
02176       {
02177         if (PacketSize==4) {
02178           Packet A = rhs.loadPacket(k, j2);
02179           pstoreu(blockB+count, cj.pconj(A));
02180           count += PacketSize;
02181         } else {
02182           const LinearMapper dm0 = rhs.getLinearMapper(k, j2);
02183           blockB[count+0] = cj(dm0(0));
02184           blockB[count+1] = cj(dm0(1));
02185           blockB[count+2] = cj(dm0(2));
02186           blockB[count+3] = cj(dm0(3));
02187           count += 4;
02188         }
02189       }
02190       // skip what we have after
02191       if(PanelMode) count += 4 * (stride-offset-depth);
02192     }
02193   }
02194   // copy the remaining columns one at a time (nr==1)
02195   for(Index j2=packet_cols4; j2<cols; ++j2)
02196   {
02197     if(PanelMode) count += offset;
02198     for(Index k=0; k<depth; k++)
02199     {
02200       blockB[count] = cj(rhs(k, j2));
02201       count += 1;
02202     }
02203     if(PanelMode) count += stride-offset-depth;
02204   }
02205 }
02206 
02207 } // end namespace internal
02208 
02211 inline std::ptrdiff_t l1CacheSize()
02212 {
02213   std::ptrdiff_t l1, l2, l3;
02214   internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
02215   return l1;
02216 }
02217 
02220 inline std::ptrdiff_t l2CacheSize()
02221 {
02222   std::ptrdiff_t l1, l2, l3;
02223   internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
02224   return l2;
02225 }
02226 
02232 inline void setCpuCacheSizes(std::ptrdiff_t l1, std::ptrdiff_t l2, std::ptrdiff_t l3)
02233 {
02234   internal::manage_caching_sizes(SetAction, &l1, &l2, &l3);
02235 }
02236 
02237 } // end namespace Eigen
02238 
02239 #endif // EIGEN_GENERAL_BLOCK_PANEL_H
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines