MOAB
4.9.3pre
|
00001 // This file is part of Eigen, a lightweight C++ template library 00002 // for linear algebra. 00003 // 00004 // Copyright (C) 2008-2009 Gael Guennebaud <[email protected]> 00005 // Copyright (C) 2010 Konstantinos Margaritis <[email protected]> 00006 // Heavily based on Gael's SSE version. 00007 // 00008 // This Source Code Form is subject to the terms of the Mozilla 00009 // Public License v. 2.0. If a copy of the MPL was not distributed 00010 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. 00011 00012 #ifndef EIGEN_PACKET_MATH_NEON_H 00013 #define EIGEN_PACKET_MATH_NEON_H 00014 00015 namespace Eigen { 00016 00017 namespace internal { 00018 00019 #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 00020 #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 00021 #endif 00022 00023 #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD 00024 #define EIGEN_HAS_SINGLE_INSTRUCTION_MADD 00025 #endif 00026 00027 #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD 00028 #define EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD 00029 #endif 00030 00031 // FIXME NEON has 16 quad registers, but since the current register allocator 00032 // is so bad, it is much better to reduce it to 8 00033 #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 00034 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16 00035 #endif 00036 00037 typedef float32x2_t Packet2f; 00038 typedef float32x4_t Packet4f; 00039 typedef int32x4_t Packet4i; 00040 typedef int32x2_t Packet2i; 00041 typedef uint32x4_t Packet4ui; 00042 00043 #define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \ 00044 const Packet4f p4f_##NAME = pset1<Packet4f>(X) 00045 00046 #define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \ 00047 const Packet4f p4f_##NAME = vreinterpretq_f32_u32(pset1<int>(X)) 00048 00049 #define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \ 00050 const Packet4i p4i_##NAME = pset1<Packet4i>(X) 00051 00052 #if EIGEN_COMP_LLVM && !EIGEN_COMP_CLANG 00053 //Special treatment for Apple's llvm-gcc, its NEON packet types are unions 00054 #define EIGEN_INIT_NEON_PACKET2(X, Y) {{X, Y}} 00055 #define EIGEN_INIT_NEON_PACKET4(X, Y, Z, W) {{X, Y, Z, W}} 00056 #else 00057 //Default initializer for packets 00058 #define EIGEN_INIT_NEON_PACKET2(X, Y) {X, Y} 00059 #define EIGEN_INIT_NEON_PACKET4(X, Y, Z, W) {X, Y, Z, W} 00060 #endif 00061 00062 00063 // arm64 does have the pld instruction. If available, let's trust the __builtin_prefetch built-in function 00064 // which available on LLVM and GCC (at least) 00065 #if EIGEN_HAS_BUILTIN(__builtin_prefetch) || EIGEN_COMP_GNUC 00066 #define EIGEN_ARM_PREFETCH(ADDR) __builtin_prefetch(ADDR); 00067 #elif defined __pld 00068 #define EIGEN_ARM_PREFETCH(ADDR) __pld(ADDR) 00069 #elif !EIGEN_ARCH_ARM64 00070 #define EIGEN_ARM_PREFETCH(ADDR) __asm__ __volatile__ ( " pld [%[addr]]\n" :: [addr] "r" (ADDR) : "cc" ); 00071 #else 00072 // by default no explicit prefetching 00073 #define EIGEN_ARM_PREFETCH(ADDR) 00074 #endif 00075 00076 template<> struct packet_traits<float> : default_packet_traits 00077 { 00078 typedef Packet4f type; 00079 typedef Packet4f half; // Packet2f intrinsics not implemented yet 00080 enum { 00081 Vectorizable = 1, 00082 AlignedOnScalar = 1, 00083 size = 4, 00084 HasHalfPacket=0, // Packet2f intrinsics not implemented yet 00085 00086 HasDiv = 1, 00087 // FIXME check the Has* 00088 HasSin = 0, 00089 HasCos = 0, 00090 HasLog = 0, 00091 HasExp = 1, 00092 HasSqrt = 0 00093 }; 00094 }; 00095 template<> struct packet_traits<int> : default_packet_traits 00096 { 00097 typedef Packet4i type; 00098 typedef Packet4i half; // Packet2i intrinsics not implemented yet 00099 enum { 00100 Vectorizable = 1, 00101 AlignedOnScalar = 1, 00102 size=4, 00103 HasHalfPacket=0 // Packet2i intrinsics not implemented yet 00104 // FIXME check the Has* 00105 }; 00106 }; 00107 00108 #if EIGEN_GNUC_AT_MOST(4,4) && !EIGEN_COMP_LLVM 00109 // workaround gcc 4.2, 4.3 and 4.4 compilatin issue 00110 EIGEN_STRONG_INLINE float32x4_t vld1q_f32(const float* x) { return ::vld1q_f32((const float32_t*)x); } 00111 EIGEN_STRONG_INLINE float32x2_t vld1_f32 (const float* x) { return ::vld1_f32 ((const float32_t*)x); } 00112 EIGEN_STRONG_INLINE float32x2_t vld1_dup_f32 (const float* x) { return ::vld1_dup_f32 ((const float32_t*)x); } 00113 EIGEN_STRONG_INLINE void vst1q_f32(float* to, float32x4_t from) { ::vst1q_f32((float32_t*)to,from); } 00114 EIGEN_STRONG_INLINE void vst1_f32 (float* to, float32x2_t from) { ::vst1_f32 ((float32_t*)to,from); } 00115 #endif 00116 00117 template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4, alignment=Aligned16}; typedef Packet4f half; }; 00118 template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4, alignment=Aligned16}; typedef Packet4i half; }; 00119 00120 template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return vdupq_n_f32(from); } 00121 template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return vdupq_n_s32(from); } 00122 00123 template<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a) 00124 { 00125 Packet4f countdown = EIGEN_INIT_NEON_PACKET4(0, 1, 2, 3); 00126 return vaddq_f32(pset1<Packet4f>(a), countdown); 00127 } 00128 template<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int& a) 00129 { 00130 Packet4i countdown = EIGEN_INIT_NEON_PACKET4(0, 1, 2, 3); 00131 return vaddq_s32(pset1<Packet4i>(a), countdown); 00132 } 00133 00134 template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return vaddq_f32(a,b); } 00135 template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return vaddq_s32(a,b); } 00136 00137 template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return vsubq_f32(a,b); } 00138 template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return vsubq_s32(a,b); } 00139 00140 template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return vnegq_f32(a); } 00141 template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return vnegq_s32(a); } 00142 00143 template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; } 00144 template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; } 00145 00146 template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmulq_f32(a,b); } 00147 template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmulq_s32(a,b); } 00148 00149 template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) 00150 { 00151 #if EIGEN_ARCH_ARM64 00152 return vdivq_f32(a,b); 00153 #else 00154 Packet4f inv, restep, div; 00155 00156 // NEON does not offer a divide instruction, we have to do a reciprocal approximation 00157 // However NEON in contrast to other SIMD engines (AltiVec/SSE), offers 00158 // a reciprocal estimate AND a reciprocal step -which saves a few instructions 00159 // vrecpeq_f32() returns an estimate to 1/b, which we will finetune with 00160 // Newton-Raphson and vrecpsq_f32() 00161 inv = vrecpeq_f32(b); 00162 00163 // This returns a differential, by which we will have to multiply inv to get a better 00164 // approximation of 1/b. 00165 restep = vrecpsq_f32(b, inv); 00166 inv = vmulq_f32(restep, inv); 00167 00168 // Finally, multiply a by 1/b and get the wanted result of the division. 00169 div = vmulq_f32(a, inv); 00170 00171 return div; 00172 #endif 00173 } 00174 00175 template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/) 00176 { eigen_assert(false && "packet integer division are not supported by NEON"); 00177 return pset1<Packet4i>(0); 00178 } 00179 00180 #ifdef __ARM_FEATURE_FMA 00181 // See bug 936. 00182 // FMA is available on VFPv4 i.e. when compiling with -mfpu=neon-vfpv4. 00183 // FMA is a true fused multiply-add i.e. only 1 rounding at the end, no intermediate rounding. 00184 // MLA is not fused i.e. does 2 roundings. 00185 // In addition to giving better accuracy, FMA also gives better performance here on a Krait (Nexus 4): 00186 // MLA: 10 GFlop/s ; FMA: 12 GFlops/s. 00187 template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vfmaq_f32(c,a,b); } 00188 #else 00189 template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vmlaq_f32(c,a,b); } 00190 #endif 00191 00192 // No FMA instruction for int, so use MLA unconditionally. 00193 template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return vmlaq_s32(c,a,b); } 00194 00195 template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return vminq_f32(a,b); } 00196 template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vminq_s32(a,b); } 00197 00198 template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmaxq_f32(a,b); } 00199 template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmaxq_s32(a,b); } 00200 00201 // Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics 00202 template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) 00203 { 00204 return vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); 00205 } 00206 template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return vandq_s32(a,b); } 00207 00208 template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) 00209 { 00210 return vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); 00211 } 00212 template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return vorrq_s32(a,b); } 00213 00214 template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) 00215 { 00216 return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); 00217 } 00218 template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return veorq_s32(a,b); } 00219 00220 template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) 00221 { 00222 return vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); 00223 } 00224 template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return vbicq_s32(a,b); } 00225 00226 template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f32(from); } 00227 template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s32(from); } 00228 00229 template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f32(from); } 00230 template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s32(from); } 00231 00232 template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from) 00233 { 00234 float32x2_t lo, hi; 00235 lo = vld1_dup_f32(from); 00236 hi = vld1_dup_f32(from+1); 00237 return vcombine_f32(lo, hi); 00238 } 00239 template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int* from) 00240 { 00241 int32x2_t lo, hi; 00242 lo = vld1_dup_s32(from); 00243 hi = vld1_dup_s32(from+1); 00244 return vcombine_s32(lo, hi); 00245 } 00246 00247 template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f32(to, from); } 00248 template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_s32(to, from); } 00249 00250 template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); } 00251 template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); } 00252 00253 template<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride) 00254 { 00255 Packet4f res = pset1<Packet4f>(0.f); 00256 res = vsetq_lane_f32(from[0*stride], res, 0); 00257 res = vsetq_lane_f32(from[1*stride], res, 1); 00258 res = vsetq_lane_f32(from[2*stride], res, 2); 00259 res = vsetq_lane_f32(from[3*stride], res, 3); 00260 return res; 00261 } 00262 template<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, Index stride) 00263 { 00264 Packet4i res = pset1<Packet4i>(0); 00265 res = vsetq_lane_s32(from[0*stride], res, 0); 00266 res = vsetq_lane_s32(from[1*stride], res, 1); 00267 res = vsetq_lane_s32(from[2*stride], res, 2); 00268 res = vsetq_lane_s32(from[3*stride], res, 3); 00269 return res; 00270 } 00271 00272 template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride) 00273 { 00274 to[stride*0] = vgetq_lane_f32(from, 0); 00275 to[stride*1] = vgetq_lane_f32(from, 1); 00276 to[stride*2] = vgetq_lane_f32(from, 2); 00277 to[stride*3] = vgetq_lane_f32(from, 3); 00278 } 00279 template<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, Index stride) 00280 { 00281 to[stride*0] = vgetq_lane_s32(from, 0); 00282 to[stride*1] = vgetq_lane_s32(from, 1); 00283 to[stride*2] = vgetq_lane_s32(from, 2); 00284 to[stride*3] = vgetq_lane_s32(from, 3); 00285 } 00286 00287 template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { EIGEN_ARM_PREFETCH(addr); } 00288 template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { EIGEN_ARM_PREFETCH(addr); } 00289 00290 // FIXME only store the 2 first elements ? 00291 template<> EIGEN_STRONG_INLINE float pfirst<Packet4f>(const Packet4f& a) { float EIGEN_ALIGN16 x[4]; vst1q_f32(x, a); return x[0]; } 00292 template<> EIGEN_STRONG_INLINE int pfirst<Packet4i>(const Packet4i& a) { int EIGEN_ALIGN16 x[4]; vst1q_s32(x, a); return x[0]; } 00293 00294 template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) { 00295 float32x2_t a_lo, a_hi; 00296 Packet4f a_r64; 00297 00298 a_r64 = vrev64q_f32(a); 00299 a_lo = vget_low_f32(a_r64); 00300 a_hi = vget_high_f32(a_r64); 00301 return vcombine_f32(a_hi, a_lo); 00302 } 00303 template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) { 00304 int32x2_t a_lo, a_hi; 00305 Packet4i a_r64; 00306 00307 a_r64 = vrev64q_s32(a); 00308 a_lo = vget_low_s32(a_r64); 00309 a_hi = vget_high_s32(a_r64); 00310 return vcombine_s32(a_hi, a_lo); 00311 } 00312 00313 template<size_t offset> 00314 struct protate_impl<offset, Packet4f> 00315 { 00316 static Packet4f run(const Packet4f& a) { 00317 return vextq_f32(a, a, offset); 00318 } 00319 }; 00320 00321 template<size_t offset> 00322 struct protate_impl<offset, Packet4i> 00323 { 00324 static Packet4i run(const Packet4i& a) { 00325 return vextq_s32(a, a, offset); 00326 } 00327 }; 00328 00329 template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vabsq_f32(a); } 00330 template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vabsq_s32(a); } 00331 00332 template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a) 00333 { 00334 float32x2_t a_lo, a_hi, sum; 00335 00336 a_lo = vget_low_f32(a); 00337 a_hi = vget_high_f32(a); 00338 sum = vpadd_f32(a_lo, a_hi); 00339 sum = vpadd_f32(sum, sum); 00340 return vget_lane_f32(sum, 0); 00341 } 00342 00343 template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs) 00344 { 00345 float32x4x2_t vtrn1, vtrn2, res1, res2; 00346 Packet4f sum1, sum2, sum; 00347 00348 // NEON zip performs interleaving of the supplied vectors. 00349 // We perform two interleaves in a row to acquire the transposed vector 00350 vtrn1 = vzipq_f32(vecs[0], vecs[2]); 00351 vtrn2 = vzipq_f32(vecs[1], vecs[3]); 00352 res1 = vzipq_f32(vtrn1.val[0], vtrn2.val[0]); 00353 res2 = vzipq_f32(vtrn1.val[1], vtrn2.val[1]); 00354 00355 // Do the addition of the resulting vectors 00356 sum1 = vaddq_f32(res1.val[0], res1.val[1]); 00357 sum2 = vaddq_f32(res2.val[0], res2.val[1]); 00358 sum = vaddq_f32(sum1, sum2); 00359 00360 return sum; 00361 } 00362 00363 template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a) 00364 { 00365 int32x2_t a_lo, a_hi, sum; 00366 00367 a_lo = vget_low_s32(a); 00368 a_hi = vget_high_s32(a); 00369 sum = vpadd_s32(a_lo, a_hi); 00370 sum = vpadd_s32(sum, sum); 00371 return vget_lane_s32(sum, 0); 00372 } 00373 00374 template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs) 00375 { 00376 int32x4x2_t vtrn1, vtrn2, res1, res2; 00377 Packet4i sum1, sum2, sum; 00378 00379 // NEON zip performs interleaving of the supplied vectors. 00380 // We perform two interleaves in a row to acquire the transposed vector 00381 vtrn1 = vzipq_s32(vecs[0], vecs[2]); 00382 vtrn2 = vzipq_s32(vecs[1], vecs[3]); 00383 res1 = vzipq_s32(vtrn1.val[0], vtrn2.val[0]); 00384 res2 = vzipq_s32(vtrn1.val[1], vtrn2.val[1]); 00385 00386 // Do the addition of the resulting vectors 00387 sum1 = vaddq_s32(res1.val[0], res1.val[1]); 00388 sum2 = vaddq_s32(res2.val[0], res2.val[1]); 00389 sum = vaddq_s32(sum1, sum2); 00390 00391 return sum; 00392 } 00393 00394 // Other reduction functions: 00395 // mul 00396 template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a) 00397 { 00398 float32x2_t a_lo, a_hi, prod; 00399 00400 // Get a_lo = |a1|a2| and a_hi = |a3|a4| 00401 a_lo = vget_low_f32(a); 00402 a_hi = vget_high_f32(a); 00403 // Get the product of a_lo * a_hi -> |a1*a3|a2*a4| 00404 prod = vmul_f32(a_lo, a_hi); 00405 // Multiply prod with its swapped value |a2*a4|a1*a3| 00406 prod = vmul_f32(prod, vrev64_f32(prod)); 00407 00408 return vget_lane_f32(prod, 0); 00409 } 00410 template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a) 00411 { 00412 int32x2_t a_lo, a_hi, prod; 00413 00414 // Get a_lo = |a1|a2| and a_hi = |a3|a4| 00415 a_lo = vget_low_s32(a); 00416 a_hi = vget_high_s32(a); 00417 // Get the product of a_lo * a_hi -> |a1*a3|a2*a4| 00418 prod = vmul_s32(a_lo, a_hi); 00419 // Multiply prod with its swapped value |a2*a4|a1*a3| 00420 prod = vmul_s32(prod, vrev64_s32(prod)); 00421 00422 return vget_lane_s32(prod, 0); 00423 } 00424 00425 // min 00426 template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a) 00427 { 00428 float32x2_t a_lo, a_hi, min; 00429 00430 a_lo = vget_low_f32(a); 00431 a_hi = vget_high_f32(a); 00432 min = vpmin_f32(a_lo, a_hi); 00433 min = vpmin_f32(min, min); 00434 00435 return vget_lane_f32(min, 0); 00436 } 00437 00438 template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a) 00439 { 00440 int32x2_t a_lo, a_hi, min; 00441 00442 a_lo = vget_low_s32(a); 00443 a_hi = vget_high_s32(a); 00444 min = vpmin_s32(a_lo, a_hi); 00445 min = vpmin_s32(min, min); 00446 00447 return vget_lane_s32(min, 0); 00448 } 00449 00450 // max 00451 template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a) 00452 { 00453 float32x2_t a_lo, a_hi, max; 00454 00455 a_lo = vget_low_f32(a); 00456 a_hi = vget_high_f32(a); 00457 max = vpmax_f32(a_lo, a_hi); 00458 max = vpmax_f32(max, max); 00459 00460 return vget_lane_f32(max, 0); 00461 } 00462 00463 template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a) 00464 { 00465 int32x2_t a_lo, a_hi, max; 00466 00467 a_lo = vget_low_s32(a); 00468 a_hi = vget_high_s32(a); 00469 max = vpmax_s32(a_lo, a_hi); 00470 max = vpmax_s32(max, max); 00471 00472 return vget_lane_s32(max, 0); 00473 } 00474 00475 // this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors, 00476 // see bug 347 and this LLVM bug: http://llvm.org/bugs/show_bug.cgi?id=11074 00477 #define PALIGN_NEON(Offset,Type,Command) \ 00478 template<>\ 00479 struct palign_impl<Offset,Type>\ 00480 {\ 00481 EIGEN_STRONG_INLINE static void run(Type& first, const Type& second)\ 00482 {\ 00483 if (Offset!=0)\ 00484 first = Command(first, second, Offset);\ 00485 }\ 00486 };\ 00487 00488 PALIGN_NEON(0,Packet4f,vextq_f32) 00489 PALIGN_NEON(1,Packet4f,vextq_f32) 00490 PALIGN_NEON(2,Packet4f,vextq_f32) 00491 PALIGN_NEON(3,Packet4f,vextq_f32) 00492 PALIGN_NEON(0,Packet4i,vextq_s32) 00493 PALIGN_NEON(1,Packet4i,vextq_s32) 00494 PALIGN_NEON(2,Packet4i,vextq_s32) 00495 PALIGN_NEON(3,Packet4i,vextq_s32) 00496 00497 #undef PALIGN_NEON 00498 00499 EIGEN_DEVICE_FUNC inline void 00500 ptranspose(PacketBlock<Packet4f,4>& kernel) { 00501 float32x4x2_t tmp1 = vzipq_f32(kernel.packet[0], kernel.packet[1]); 00502 float32x4x2_t tmp2 = vzipq_f32(kernel.packet[2], kernel.packet[3]); 00503 00504 kernel.packet[0] = vcombine_f32(vget_low_f32(tmp1.val[0]), vget_low_f32(tmp2.val[0])); 00505 kernel.packet[1] = vcombine_f32(vget_high_f32(tmp1.val[0]), vget_high_f32(tmp2.val[0])); 00506 kernel.packet[2] = vcombine_f32(vget_low_f32(tmp1.val[1]), vget_low_f32(tmp2.val[1])); 00507 kernel.packet[3] = vcombine_f32(vget_high_f32(tmp1.val[1]), vget_high_f32(tmp2.val[1])); 00508 } 00509 00510 EIGEN_DEVICE_FUNC inline void 00511 ptranspose(PacketBlock<Packet4i,4>& kernel) { 00512 int32x4x2_t tmp1 = vzipq_s32(kernel.packet[0], kernel.packet[1]); 00513 int32x4x2_t tmp2 = vzipq_s32(kernel.packet[2], kernel.packet[3]); 00514 kernel.packet[0] = vcombine_s32(vget_low_s32(tmp1.val[0]), vget_low_s32(tmp2.val[0])); 00515 kernel.packet[1] = vcombine_s32(vget_high_s32(tmp1.val[0]), vget_high_s32(tmp2.val[0])); 00516 kernel.packet[2] = vcombine_s32(vget_low_s32(tmp1.val[1]), vget_low_s32(tmp2.val[1])); 00517 kernel.packet[3] = vcombine_s32(vget_high_s32(tmp1.val[1]), vget_high_s32(tmp2.val[1])); 00518 } 00519 00520 //---------- double ---------- 00521 00522 // Clang 3.5 in the iOS toolchain has an ICE triggered by NEON intrisics for double. 00523 // Confirmed at least with __apple_build_version__ = 6000054. 00524 #ifdef __apple_build_version__ 00525 // Let's hope that by the time __apple_build_version__ hits the 601* range, the bug will be fixed. 00526 // https://gist.github.com/yamaya/2924292 suggests that the 3 first digits are only updated with 00527 // major toolchain updates. 00528 #define EIGEN_APPLE_DOUBLE_NEON_BUG (__apple_build_version__ < 6010000) 00529 #else 00530 #define EIGEN_APPLE_DOUBLE_NEON_BUG 0 00531 #endif 00532 00533 #if EIGEN_ARCH_ARM64 && !EIGEN_APPLE_DOUBLE_NEON_BUG 00534 00535 #if (EIGEN_COMP_GNUC_STRICT && defined(__ANDROID__)) || defined(__apple_build_version__) 00536 // Bug 907: workaround missing declarations of the following two functions in the ADK 00537 __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) 00538 vreinterpretq_u64_f64 (float64x2_t __a) 00539 { 00540 return (uint64x2_t) __a; 00541 } 00542 00543 __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) 00544 vreinterpretq_f64_u64 (uint64x2_t __a) 00545 { 00546 return (float64x2_t) __a; 00547 } 00548 #endif 00549 00550 typedef float64x2_t Packet2d; 00551 typedef float64x1_t Packet1d; 00552 00553 template<> struct packet_traits<double> : default_packet_traits 00554 { 00555 typedef Packet2d type; 00556 typedef Packet2d half; 00557 enum { 00558 Vectorizable = 1, 00559 AlignedOnScalar = 1, 00560 size = 2, 00561 HasHalfPacket=0, 00562 00563 HasDiv = 1, 00564 // FIXME check the Has* 00565 HasSin = 0, 00566 HasCos = 0, 00567 HasLog = 0, 00568 HasExp = 0, 00569 HasSqrt = 0 00570 }; 00571 }; 00572 00573 template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2, alignment=Aligned16}; typedef Packet2d half; }; 00574 00575 template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return vdupq_n_f64(from); } 00576 00577 template<> EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(const double& a) 00578 { 00579 Packet2d countdown = EIGEN_INIT_NEON_PACKET2(0, 1); 00580 return vaddq_f64(pset1<Packet2d>(a), countdown); 00581 } 00582 template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return vaddq_f64(a,b); } 00583 00584 template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return vsubq_f64(a,b); } 00585 00586 template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) { return vnegq_f64(a); } 00587 00588 template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; } 00589 00590 template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return vmulq_f64(a,b); } 00591 00592 template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return vdivq_f64(a,b); } 00593 00594 #ifdef __ARM_FEATURE_FMA 00595 // See bug 936. See above comment about FMA for float. 00596 template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vfmaq_f64(c,a,b); } 00597 #else 00598 template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vmlaq_f64(c,a,b); } 00599 #endif 00600 00601 template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return vminq_f64(a,b); } 00602 00603 template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return vmaxq_f64(a,b); } 00604 00605 // Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics 00606 template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) 00607 { 00608 return vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); 00609 } 00610 00611 template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) 00612 { 00613 return vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); 00614 } 00615 00616 template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) 00617 { 00618 return vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); 00619 } 00620 00621 template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) 00622 { 00623 return vreinterpretq_f64_u64(vbicq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); 00624 } 00625 00626 template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f64(from); } 00627 00628 template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f64(from); } 00629 00630 template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double* from) 00631 { 00632 return vld1q_dup_f64(from); 00633 } 00634 template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f64(to, from); } 00635 00636 template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f64(to, from); } 00637 00638 template<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride) 00639 { 00640 Packet2d res = pset1<Packet2d>(0.0); 00641 res = vsetq_lane_f64(from[0*stride], res, 0); 00642 res = vsetq_lane_f64(from[1*stride], res, 1); 00643 return res; 00644 } 00645 template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride) 00646 { 00647 to[stride*0] = vgetq_lane_f64(from, 0); 00648 to[stride*1] = vgetq_lane_f64(from, 1); 00649 } 00650 template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { EIGEN_ARM_PREFETCH(addr); } 00651 00652 // FIXME only store the 2 first elements ? 00653 template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return vgetq_lane_f64(a, 0); } 00654 00655 template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) { return vcombine_f64(vget_high_f64(a), vget_low_f64(a)); } 00656 00657 template<size_t offset> 00658 struct protate_impl<offset, Packet2d> 00659 { 00660 static Packet2d run(const Packet2d& a) { 00661 return vextq_f64(a, a, offset); 00662 } 00663 }; 00664 00665 template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) { return vabsq_f64(a); } 00666 00667 #if EIGEN_COMP_CLANG && defined(__apple_build_version__) 00668 // workaround ICE, see bug 907 00669 template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) { return (vget_low_f64(a) + vget_high_f64(a))[0]; } 00670 #else 00671 template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) { return vget_lane_f64(vget_low_f64(a) + vget_high_f64(a), 0); } 00672 #endif 00673 00674 template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs) 00675 { 00676 float64x2_t trn1, trn2; 00677 00678 // NEON zip performs interleaving of the supplied vectors. 00679 // We perform two interleaves in a row to acquire the transposed vector 00680 trn1 = vzip1q_f64(vecs[0], vecs[1]); 00681 trn2 = vzip2q_f64(vecs[0], vecs[1]); 00682 00683 // Do the addition of the resulting vectors 00684 return vaddq_f64(trn1, trn2); 00685 } 00686 // Other reduction functions: 00687 // mul 00688 #if EIGEN_COMP_CLANG && defined(__apple_build_version__) 00689 template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a) { return (vget_low_f64(a) * vget_high_f64(a))[0]; } 00690 #else 00691 template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a) { return vget_lane_f64(vget_low_f64(a) * vget_high_f64(a), 0); } 00692 #endif 00693 00694 // min 00695 template<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a) { return vgetq_lane_f64(vpminq_f64(a, a), 0); } 00696 00697 // max 00698 template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a) { return vgetq_lane_f64(vpmaxq_f64(a, a), 0); } 00699 00700 // this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors, 00701 // see bug 347 and this LLVM bug: http://llvm.org/bugs/show_bug.cgi?id=11074 00702 #define PALIGN_NEON(Offset,Type,Command) \ 00703 template<>\ 00704 struct palign_impl<Offset,Type>\ 00705 {\ 00706 EIGEN_STRONG_INLINE static void run(Type& first, const Type& second)\ 00707 {\ 00708 if (Offset!=0)\ 00709 first = Command(first, second, Offset);\ 00710 }\ 00711 };\ 00712 00713 PALIGN_NEON(0,Packet2d,vextq_f64) 00714 PALIGN_NEON(1,Packet2d,vextq_f64) 00715 #undef PALIGN_NEON 00716 00717 EIGEN_DEVICE_FUNC inline void 00718 ptranspose(PacketBlock<Packet2d,2>& kernel) { 00719 float64x2_t trn1 = vzip1q_f64(kernel.packet[0], kernel.packet[1]); 00720 float64x2_t trn2 = vzip2q_f64(kernel.packet[0], kernel.packet[1]); 00721 00722 kernel.packet[0] = trn1; 00723 kernel.packet[1] = trn2; 00724 } 00725 #endif // EIGEN_ARCH_ARM64 00726 00727 } // end namespace internal 00728 00729 } // end namespace Eigen 00730 00731 #endif // EIGEN_PACKET_MATH_NEON_H