MOAB  4.9.3pre
Eigen::internal Namespace Reference

Namespaces

namespace  cephes

Classes

struct  ldlt_inplace< Lower >
struct  ldlt_inplace< Upper >
struct  LDLT_Traits< MatrixType, Lower >
struct  LDLT_Traits< MatrixType, Upper >
struct  llt_inplace< Scalar, Lower >
struct  llt_inplace< Scalar, Upper >
struct  LLT_Traits< MatrixType, Lower >
struct  LLT_Traits< MatrixType, Upper >
struct  Packet2cf
struct  packet_traits< std::complex< float > >
struct  unpacket_traits< Packet2cf >
struct  palign_impl< Offset, Packet2cf >
struct  conj_helper< Packet2cf, Packet2cf, false, true >
struct  conj_helper< Packet2cf, Packet2cf, true, false >
struct  conj_helper< Packet2cf, Packet2cf, true, true >
struct  packet_traits< float >
struct  packet_traits< int >
struct  unpacket_traits< Packet4f >
struct  unpacket_traits< Packet4i >
struct  palign_impl< Offset, Packet4f >
struct  palign_impl< Offset, Packet4i >
struct  Packet4cf
struct  unpacket_traits< Packet4cf >
struct  palign_impl< Offset, Packet4cf >
struct  conj_helper< Packet4cf, Packet4cf, false, true >
struct  conj_helper< Packet4cf, Packet4cf, true, false >
struct  conj_helper< Packet4cf, Packet4cf, true, true >
struct  conj_helper< Packet8f, Packet4cf, false, false >
struct  conj_helper< Packet4cf, Packet8f, false, false >
struct  Packet2cd
struct  packet_traits< std::complex< double > >
struct  unpacket_traits< Packet2cd >
struct  palign_impl< Offset, Packet2cd >
struct  conj_helper< Packet2cd, Packet2cd, false, true >
struct  conj_helper< Packet2cd, Packet2cd, true, false >
struct  conj_helper< Packet2cd, Packet2cd, true, true >
struct  conj_helper< Packet4d, Packet2cd, false, false >
struct  conj_helper< Packet2cd, Packet4d, false, false >
struct  is_arithmetic< __m256 >
struct  is_arithmetic< __m256i >
struct  is_arithmetic< __m256d >
struct  packet_traits< double >
struct  unpacket_traits< Packet8f >
struct  unpacket_traits< Packet4d >
struct  unpacket_traits< Packet8i >
struct  palign_impl< Offset, Packet8f >
struct  palign_impl< Offset, Packet4d >
struct  type_casting_traits< float, int >
struct  type_casting_traits< int, float >
struct  protate_impl< offset, Packet4f >
struct  protate_impl< offset, Packet4i >
struct  conj_helper< Packet4f, Packet2cf, false, false >
struct  conj_helper< Packet2cf, Packet4f, false, false >
struct  Packet1cd
struct  unpacket_traits< Packet1cd >
struct  palign_impl< Offset, Packet1cd >
struct  conj_helper< Packet1cd, Packet1cd, false, true >
struct  conj_helper< Packet1cd, Packet1cd, true, false >
struct  conj_helper< Packet1cd, Packet1cd, true, true >
struct  conj_helper< Packet2d, Packet1cd, false, false >
struct  conj_helper< Packet1cd, Packet2d, false, false >
struct  is_arithmetic< __m128 >
struct  is_arithmetic< __m128i >
struct  is_arithmetic< __m128d >
struct  unpacket_traits< Packet2d >
struct  protate_impl< offset, Packet2d >
struct  palign_impl< Offset, Packet2d >
struct  type_casting_traits< double, float >
struct  type_casting_traits< float, double >
struct  traits< Array< _Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols > >
struct  traits< ArrayWrapper< ExpressionType > >
struct  traits< MatrixWrapper< ExpressionType > >
class  vml_assign_traits
struct  copy_using_evaluator_traits
struct  copy_using_evaluator_DefaultTraversal_CompleteUnrolling
struct  copy_using_evaluator_DefaultTraversal_CompleteUnrolling< Kernel, Stop, Stop >
struct  copy_using_evaluator_DefaultTraversal_InnerUnrolling
struct  copy_using_evaluator_DefaultTraversal_InnerUnrolling< Kernel, Stop, Stop >
struct  copy_using_evaluator_LinearTraversal_CompleteUnrolling
struct  copy_using_evaluator_LinearTraversal_CompleteUnrolling< Kernel, Stop, Stop >
struct  copy_using_evaluator_innervec_CompleteUnrolling
struct  copy_using_evaluator_innervec_CompleteUnrolling< Kernel, Stop, Stop >
struct  copy_using_evaluator_innervec_InnerUnrolling
struct  copy_using_evaluator_innervec_InnerUnrolling< Kernel, Stop, Stop >
struct  dense_assignment_loop< Kernel, DefaultTraversal, NoUnrolling >
struct  dense_assignment_loop< Kernel, DefaultTraversal, CompleteUnrolling >
struct  dense_assignment_loop< Kernel, DefaultTraversal, InnerUnrolling >
struct  unaligned_dense_assignment_loop
struct  unaligned_dense_assignment_loop< false >
struct  dense_assignment_loop< Kernel, LinearVectorizedTraversal, NoUnrolling >
struct  dense_assignment_loop< Kernel, LinearVectorizedTraversal, CompleteUnrolling >
struct  dense_assignment_loop< Kernel, InnerVectorizedTraversal, NoUnrolling >
struct  dense_assignment_loop< Kernel, InnerVectorizedTraversal, CompleteUnrolling >
struct  dense_assignment_loop< Kernel, InnerVectorizedTraversal, InnerUnrolling >
struct  dense_assignment_loop< Kernel, LinearTraversal, NoUnrolling >
struct  dense_assignment_loop< Kernel, LinearTraversal, CompleteUnrolling >
struct  dense_assignment_loop< Kernel, SliceVectorizedTraversal, NoUnrolling >
class  generic_dense_assignment_kernel
struct  Dense2Dense
struct  EigenBase2EigenBase
struct  AssignmentKind
struct  AssignmentKind< DenseShape, DenseShape >
struct  Assignment< DstXprType, SrcXprType, Functor, Dense2Dense, Scalar >
struct  Assignment< DstXprType, SrcXprType, Functor, EigenBase2EigenBase, Scalar >
class  BandMatrixBase
struct  traits< BandMatrix< _Scalar, _Rows, _Cols, _Supers, _Subs, _Options > >
class  BandMatrix
 Represents a rectangular matrix with a banded storage. More...
struct  traits< BandMatrixWrapper< _CoefficientsType, _Rows, _Cols, _Supers, _Subs, _Options > >
class  BandMatrixWrapper
class  TridiagonalMatrix
 Represents a tridiagonal matrix with a compact banded storage. More...
struct  BandShape
struct  evaluator_traits< BandMatrix< _Scalar, _Rows, _Cols, _Supers, _Subs, _Options > >
struct  evaluator_traits< BandMatrixWrapper< _CoefficientsType, _Rows, _Cols, _Supers, _Subs, _Options > >
struct  AssignmentKind< DenseShape, BandShape >
struct  traits< Block< XprType, BlockRows, BlockCols, InnerPanel > >
class  BlockImpl_dense
class  BlockImpl_dense< XprType, BlockRows, BlockCols, InnerPanel, true >
struct  all_unroller
struct  all_unroller< Derived, 0 >
struct  all_unroller< Derived, Dynamic >
struct  any_unroller
struct  any_unroller< Derived, 0 >
struct  any_unroller< Derived, Dynamic >
struct  storage_kind_to_evaluator_kind
struct  storage_kind_to_shape< Dense >
struct  storage_kind_to_shape< SolverStorage >
struct  storage_kind_to_shape< PermutationStorage >
struct  storage_kind_to_shape< TranspositionsStorage >
struct  evaluator_traits_base
struct  evaluator_traits
struct  evaluator_assume_aliasing
struct  evaluator
struct  evaluator< const T >
struct  evaluator_base
struct  evaluator< PlainObjectBase< Derived > >
struct  evaluator< Matrix< Scalar, Rows, Cols, Options, MaxRows, MaxCols > >
struct  evaluator< Array< Scalar, Rows, Cols, Options, MaxRows, MaxCols > >
struct  unary_evaluator< Transpose< ArgType >, IndexBased >
struct  evaluator< CwiseNullaryOp< NullaryOp, PlainObjectType > >
struct  unary_evaluator< CwiseUnaryOp< UnaryOp, ArgType >, IndexBased >
struct  evaluator< CwiseBinaryOp< BinaryOp, Lhs, Rhs > >
struct  binary_evaluator< CwiseBinaryOp< BinaryOp, Lhs, Rhs >, IndexBased, IndexBased >
struct  unary_evaluator< CwiseUnaryView< UnaryOp, ArgType >, IndexBased >
struct  mapbase_evaluator
struct  evaluator< Map< PlainObjectType, MapOptions, StrideType > >
struct  evaluator< Ref< PlainObjectType, RefOptions, StrideType > >
struct  evaluator< Block< ArgType, BlockRows, BlockCols, InnerPanel > >
struct  block_evaluator< ArgType, BlockRows, BlockCols, InnerPanel, false >
struct  unary_evaluator< Block< ArgType, BlockRows, BlockCols, InnerPanel >, IndexBased >
struct  block_evaluator< ArgType, BlockRows, BlockCols, InnerPanel, true >
struct  evaluator< Select< ConditionMatrixType, ThenMatrixType, ElseMatrixType > >
struct  unary_evaluator< Replicate< ArgType, RowFactor, ColFactor > >
struct  evaluator< PartialReduxExpr< ArgType, MemberOp, Direction > >
struct  evaluator_wrapper_base
struct  unary_evaluator< MatrixWrapper< TArgType > >
struct  unary_evaluator< ArrayWrapper< TArgType > >
struct  unary_evaluator< Reverse< ArgType, Direction > >
struct  evaluator< Diagonal< ArgType, DiagIndex > >
struct  traits< EvalToTemp< ArgType > >
class  EvalToTemp
struct  evaluator< EvalToTemp< ArgType > >
class  inner_iterator_selector< XprType, IndexBased >
class  inner_iterator_selector< XprType, IteratorBased >
struct  traits< CwiseBinaryOp< BinaryOp, Lhs, Rhs > >
struct  traits< CwiseNullaryOp< NullaryOp, PlainObjectType > >
struct  setIdentity_impl
struct  setIdentity_impl< Derived, true >
struct  traits< CwiseUnaryOp< UnaryOp, XprType > >
struct  traits< CwiseUnaryView< ViewOp, MatrixType > >
struct  add_const_on_value_type_if_arithmetic
struct  first_aligned_impl
struct  first_aligned_impl< Alignment, Derived, false >
struct  inner_stride_at_compile_time
struct  inner_stride_at_compile_time< Derived, false >
struct  outer_stride_at_compile_time
struct  outer_stride_at_compile_time< Derived, false >
struct  constructor_without_unaligned_array_assert
struct  plain_array
struct  plain_array< T, Size, MatrixOrArrayOptions, 8 >
struct  plain_array< T, Size, MatrixOrArrayOptions, 16 >
struct  plain_array< T, Size, MatrixOrArrayOptions, 32 >
struct  plain_array< T, Size, MatrixOrArrayOptions, 64 >
struct  plain_array< T, 0, MatrixOrArrayOptions, Alignment >
struct  traits< Diagonal< MatrixType, DiagIndex > >
struct  traits< DiagonalMatrix< _Scalar, SizeAtCompileTime, MaxSizeAtCompileTime > >
struct  traits< DiagonalWrapper< _DiagonalVectorType > >
struct  storage_kind_to_shape< DiagonalShape >
struct  Diagonal2Dense
struct  AssignmentKind< DenseShape, DiagonalShape >
struct  Assignment< DstXprType, SrcXprType, Functor, Diagonal2Dense, Scalar >
struct  dot_nocheck
struct  dot_nocheck< T, U, true >
struct  lpNorm_selector
struct  lpNorm_selector< Derived, 1 >
struct  lpNorm_selector< Derived, 2 >
struct  lpNorm_selector< Derived, Infinity >
struct  traits< ForceAlignedAccess< ExpressionType > >
struct  assign_op
 Template functor for scalar/packet assignment. More...
struct  functor_traits< assign_op< Scalar > >
struct  add_assign_op
 Template functor for scalar/packet assignment with addition. More...
struct  functor_traits< add_assign_op< Scalar > >
struct  sub_assign_op
 Template functor for scalar/packet assignment with subtraction. More...
struct  functor_traits< sub_assign_op< Scalar > >
struct  mul_assign_op
 Template functor for scalar/packet assignment with multiplication. More...
struct  functor_traits< mul_assign_op< DstScalar, SrcScalar > >
struct  functor_is_product_like< mul_assign_op< DstScalar, SrcScalar > >
struct  div_assign_op
 Template functor for scalar/packet assignment with diviving. More...
struct  functor_traits< div_assign_op< Scalar > >
struct  swap_assign_op
 Template functor for scalar/packet assignment with swapping. More...
struct  functor_traits< swap_assign_op< Scalar > >
struct  scalar_sum_op
 Template functor to compute the sum of two scalars. More...
struct  functor_traits< scalar_sum_op< Scalar > >
struct  scalar_sum_op< bool >
 Template specialization to deprecate the summation of boolean expressions. This is required to solve Bug 426. More...
struct  scalar_product_op
 Template functor to compute the product of two scalars. More...
struct  functor_traits< scalar_product_op< LhsScalar, RhsScalar > >
struct  scalar_conj_product_op
 Template functor to compute the conjugate product of two scalars. More...
struct  functor_traits< scalar_conj_product_op< LhsScalar, RhsScalar > >
struct  scalar_min_op
 Template functor to compute the min of two scalars. More...
struct  functor_traits< scalar_min_op< Scalar > >
struct  scalar_max_op
 Template functor to compute the max of two scalars. More...
struct  functor_traits< scalar_max_op< Scalar > >
struct  functor_traits< scalar_cmp_op< Scalar, cmp > >
struct  result_of< scalar_cmp_op< Scalar, Cmp >(Scalar, Scalar)>
struct  scalar_cmp_op< Scalar, cmp_EQ >
struct  scalar_cmp_op< Scalar, cmp_LT >
struct  scalar_cmp_op< Scalar, cmp_LE >
struct  scalar_cmp_op< Scalar, cmp_GT >
struct  scalar_cmp_op< Scalar, cmp_GE >
struct  scalar_cmp_op< Scalar, cmp_UNORD >
struct  scalar_cmp_op< Scalar, cmp_NEQ >
struct  scalar_hypot_op
 Template functor to compute the hypot of two scalars. More...
struct  functor_traits< scalar_hypot_op< Scalar > >
struct  scalar_binary_pow_op
 Template functor to compute the pow of two scalars. More...
struct  functor_traits< scalar_binary_pow_op< Scalar, OtherScalar > >
struct  scalar_difference_op
 Template functor to compute the difference of two scalars. More...
struct  functor_traits< scalar_difference_op< Scalar > >
struct  scalar_quotient_op
 Template functor to compute the quotient of two scalars. More...
struct  functor_traits< scalar_quotient_op< LhsScalar, RhsScalar > >
struct  scalar_boolean_and_op
 Template functor to compute the and of two booleans. More...
struct  functor_traits< scalar_boolean_and_op >
struct  scalar_boolean_or_op
 Template functor to compute the or of two booleans. More...
struct  functor_traits< scalar_boolean_or_op >
struct  scalar_multiple_op
 Template functor to multiply a scalar by a fixed other one. More...
struct  functor_traits< scalar_multiple_op< Scalar > >
struct  scalar_multiple2_op
struct  functor_traits< scalar_multiple2_op< Scalar1, Scalar2 > >
struct  scalar_quotient1_op
 Template functor to divide a scalar by a fixed other one. More...
struct  functor_traits< scalar_quotient1_op< Scalar > >
struct  scalar_quotient2_op
struct  functor_traits< scalar_quotient2_op< Scalar1, Scalar2 > >
struct  functor_is_product_like
struct  functor_is_product_like< scalar_product_op< LhsScalar, RhsScalar > >
struct  functor_is_product_like< scalar_conj_product_op< LhsScalar, RhsScalar > >
struct  functor_is_product_like< scalar_quotient_op< LhsScalar, RhsScalar > >
struct  scalar_add_op
 Template functor to add a scalar to a fixed other one. More...
struct  functor_traits< scalar_add_op< Scalar > >
struct  scalar_sub_op
 Template functor to subtract a fixed scalar to another one. More...
struct  functor_traits< scalar_sub_op< Scalar > >
struct  scalar_rsub_op
 Template functor to subtract a scalar to fixed another one. More...
struct  functor_traits< scalar_rsub_op< Scalar > >
struct  scalar_pow_op
 Template functor to raise a scalar to a power. More...
struct  functor_traits< scalar_pow_op< Scalar > >
struct  scalar_inverse_mult_op
 Template functor to compute the quotient between a scalar and array entries. More...
struct  scalar_constant_op
struct  functor_traits< scalar_constant_op< Scalar > >
struct  scalar_identity_op
struct  functor_traits< scalar_identity_op< Scalar > >
struct  linspaced_op_impl< Scalar, Packet, false, false >
struct  linspaced_op_impl< Scalar, Packet, true, false >
struct  linspaced_op_impl< Scalar, Packet, true, true >
struct  functor_traits< linspaced_op< Scalar, PacketType, RandomAccess > >
struct  linspaced_op
struct  functor_has_linear_access
struct  functor_has_linear_access< scalar_identity_op< Scalar > >
struct  functor_traits< std::multiplies< T > >
struct  functor_traits< std::divides< T > >
struct  functor_traits< std::plus< T > >
struct  functor_traits< std::minus< T > >
struct  functor_traits< std::negate< T > >
struct  functor_traits< std::logical_or< T > >
struct  functor_traits< std::logical_and< T > >
struct  functor_traits< std::logical_not< T > >
struct  functor_traits< std::greater< T > >
struct  functor_traits< std::less< T > >
struct  functor_traits< std::greater_equal< T > >
struct  functor_traits< std::less_equal< T > >
struct  functor_traits< std::equal_to< T > >
struct  functor_traits< std::not_equal_to< T > >
struct  functor_traits< std::binder2nd< T > >
struct  functor_traits< std::binder1st< T > >
struct  functor_traits< std::unary_negate< T > >
struct  functor_traits< std::binary_negate< T > >
struct  scalar_opposite_op
 Template functor to compute the opposite of a scalar. More...
struct  functor_traits< scalar_opposite_op< Scalar > >
struct  scalar_abs_op
 Template functor to compute the absolute value of a scalar. More...
struct  functor_traits< scalar_abs_op< Scalar > >
struct  scalar_score_coeff_op
 Template functor to compute the score of a scalar, to chose a pivot. More...
struct  functor_traits< scalar_score_coeff_op< Scalar > >
struct  abs_knowing_score
struct  abs_knowing_score< Scalar, typename scalar_score_coeff_op< Scalar >::Score_is_abs >
struct  scalar_abs2_op
 Template functor to compute the squared absolute value of a scalar. More...
struct  functor_traits< scalar_abs2_op< Scalar > >
struct  scalar_conjugate_op
 Template functor to compute the conjugate of a complex value. More...
struct  functor_traits< scalar_conjugate_op< Scalar > >
struct  scalar_arg_op
 Template functor to compute the phase angle of a complex. More...
struct  functor_traits< scalar_arg_op< Scalar > >
struct  scalar_cast_op
 Template functor to cast a scalar to another type. More...
struct  functor_traits< scalar_cast_op< Scalar, NewType > >
struct  scalar_real_op
 Template functor to extract the real part of a complex. More...
struct  functor_traits< scalar_real_op< Scalar > >
struct  scalar_imag_op
 Template functor to extract the imaginary part of a complex. More...
struct  functor_traits< scalar_imag_op< Scalar > >
struct  scalar_real_ref_op
 Template functor to extract the real part of a complex as a reference. More...
struct  functor_traits< scalar_real_ref_op< Scalar > >
struct  scalar_imag_ref_op
 Template functor to extract the imaginary part of a complex as a reference. More...
struct  functor_traits< scalar_imag_ref_op< Scalar > >
struct  scalar_exp_op
 Template functor to compute the exponential of a scalar. More...
struct  functor_traits< scalar_exp_op< Scalar > >
struct  scalar_log_op
 Template functor to compute the logarithm of a scalar. More...
struct  functor_traits< scalar_log_op< Scalar > >
struct  scalar_log10_op
 Template functor to compute the base-10 logarithm of a scalar. More...
struct  functor_traits< scalar_log10_op< Scalar > >
struct  scalar_sqrt_op
 Template functor to compute the square root of a scalar. More...
struct  functor_traits< scalar_sqrt_op< Scalar > >
struct  scalar_rsqrt_op
 Template functor to compute the reciprocal square root of a scalar. More...
struct  functor_traits< scalar_rsqrt_op< Scalar > >
struct  scalar_cos_op
 Template functor to compute the cosine of a scalar. More...
struct  functor_traits< scalar_cos_op< Scalar > >
struct  scalar_sin_op
 Template functor to compute the sine of a scalar. More...
struct  functor_traits< scalar_sin_op< Scalar > >
struct  scalar_tan_op
 Template functor to compute the tan of a scalar. More...
struct  functor_traits< scalar_tan_op< Scalar > >
struct  scalar_acos_op
 Template functor to compute the arc cosine of a scalar. More...
struct  functor_traits< scalar_acos_op< Scalar > >
struct  scalar_asin_op
 Template functor to compute the arc sine of a scalar. More...
struct  functor_traits< scalar_asin_op< Scalar > >
struct  scalar_lgamma_op
 Template functor to compute the natural log of the absolute value of Gamma of a scalar. More...
struct  functor_traits< scalar_lgamma_op< Scalar > >
struct  scalar_digamma_op
 Template functor to compute psi, the derivative of lgamma of a scalar. More...
struct  functor_traits< scalar_digamma_op< Scalar > >
struct  scalar_erf_op
 Template functor to compute the Gauss error function of a scalar. More...
struct  functor_traits< scalar_erf_op< Scalar > >
struct  scalar_erfc_op
 Template functor to compute the Complementary Error Function of a scalar. More...
struct  functor_traits< scalar_erfc_op< Scalar > >
struct  scalar_atan_op
 Template functor to compute the atan of a scalar. More...
struct  functor_traits< scalar_atan_op< Scalar > >
struct  scalar_tanh_op
 Template functor to compute the tanh of a scalar. More...
struct  functor_traits< scalar_tanh_op< Scalar > >
struct  scalar_sinh_op
 Template functor to compute the sinh of a scalar. More...
struct  functor_traits< scalar_sinh_op< Scalar > >
struct  scalar_cosh_op
 Template functor to compute the cosh of a scalar. More...
struct  functor_traits< scalar_cosh_op< Scalar > >
struct  scalar_inverse_op
 Template functor to compute the inverse of a scalar. More...
struct  functor_traits< scalar_inverse_op< Scalar > >
struct  scalar_square_op
 Template functor to compute the square of a scalar. More...
struct  functor_traits< scalar_square_op< Scalar > >
struct  scalar_cube_op
 Template functor to compute the cube of a scalar. More...
struct  functor_traits< scalar_cube_op< Scalar > >
struct  scalar_round_op
 Template functor to compute the rounded value of a scalar. More...
struct  functor_traits< scalar_round_op< Scalar > >
struct  scalar_floor_op
 Template functor to compute the floor of a scalar. More...
struct  functor_traits< scalar_floor_op< Scalar > >
struct  scalar_ceil_op
 Template functor to compute the ceil of a scalar. More...
struct  functor_traits< scalar_ceil_op< Scalar > >
struct  scalar_isnan_op
 Template functor to compute whether a scalar is NaN. More...
struct  functor_traits< scalar_isnan_op< Scalar > >
struct  scalar_isinf_op
 Template functor to check whether a scalar is +/-inf. More...
struct  functor_traits< scalar_isinf_op< Scalar > >
struct  scalar_isfinite_op
 Template functor to check whether a scalar has a finite value. More...
struct  functor_traits< scalar_isfinite_op< Scalar > >
struct  scalar_boolean_not_op
 Template functor to compute the logical not of a boolean. More...
struct  functor_traits< scalar_boolean_not_op< Scalar > >
struct  scalar_sign_op< Scalar, false >
struct  scalar_sign_op< Scalar, true >
struct  functor_traits< scalar_sign_op< Scalar > >
struct  isApprox_selector
struct  isApprox_selector< Derived, OtherDerived, true >
struct  isMuchSmallerThan_object_selector
struct  isMuchSmallerThan_object_selector< Derived, OtherDerived, true >
struct  isMuchSmallerThan_scalar_selector
struct  isMuchSmallerThan_scalar_selector< Derived, true >
struct  product_size_category
struct  product_type
struct  product_type_selector< M, N, 1 >
struct  product_type_selector< 1, 1, Depth >
struct  product_type_selector< 1, 1, 1 >
struct  product_type_selector< Small, 1, Small >
struct  product_type_selector< 1, Small, Small >
struct  product_type_selector< Small, Small, Small >
struct  product_type_selector< Small, Small, 1 >
struct  product_type_selector< Small, Large, 1 >
struct  product_type_selector< Large, Small, 1 >
struct  product_type_selector< 1, Large, Small >
struct  product_type_selector< 1, Large, Large >
struct  product_type_selector< 1, Small, Large >
struct  product_type_selector< Large, 1, Small >
struct  product_type_selector< Large, 1, Large >
struct  product_type_selector< Small, 1, Large >
struct  product_type_selector< Small, Small, Large >
struct  product_type_selector< Large, Small, Large >
struct  product_type_selector< Small, Large, Large >
struct  product_type_selector< Large, Large, Large >
struct  product_type_selector< Large, Small, Small >
struct  product_type_selector< Small, Large, Small >
struct  product_type_selector< Large, Large, Small >
struct  gemv_static_vector_if< Scalar, Size, MaxSize, false >
struct  gemv_static_vector_if< Scalar, Size, Dynamic, true >
struct  gemv_static_vector_if< Scalar, Size, MaxSize, true >
struct  gemv_dense_selector< OnTheLeft, StorageOrder, BlasCompatible >
struct  gemv_dense_selector< OnTheRight, ColMajor, true >
struct  gemv_dense_selector< OnTheRight, RowMajor, true >
struct  gemv_dense_selector< OnTheRight, ColMajor, false >
struct  gemv_dense_selector< OnTheRight, RowMajor, false >
struct  default_packet_traits
struct  packet_traits
struct  packet_traits< const T >
struct  type_casting_traits
struct  protate_impl
struct  palign_impl
struct  PacketBlock
struct  Selector
struct  traits< Inverse< XprType > >
struct  unary_evaluator< Inverse< ArgType > >
 Default evaluator for Inverse expression. More...
struct  significant_decimals_default_impl
struct  significant_decimals_default_impl< Scalar, true >
struct  significant_decimals_impl
struct  traits< Map< PlainObjectType, MapOptions, StrideType > >
struct  traits< Matrix< _Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols > >
struct  traits< NestByValue< ExpressionType > >
struct  traits< PermutationMatrix< SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex > >
struct  traits< Map< PermutationMatrix< SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex >, _PacketAccess > >
struct  traits< PermutationWrapper< _IndicesType > >
struct  AssignmentKind< DenseShape, PermutationShape >
struct  check_rows_cols_for_overflow
struct  check_rows_cols_for_overflow< Dynamic >
struct  conservative_resize_like_impl
struct  conservative_resize_like_impl< Derived, OtherDerived, true >
struct  matrix_swap_impl
struct  matrix_swap_impl< MatrixTypeA, MatrixTypeB, true >
struct  product_result_scalar
struct  product_result_scalar< Lhs, Rhs, PermutationShape, RhsShape >
struct  product_result_scalar< Lhs, Rhs, LhsShape, PermutationShape >
struct  product_result_scalar< Lhs, Rhs, TranspositionsShape, RhsShape >
struct  product_result_scalar< Lhs, Rhs, LhsShape, TranspositionsShape >
struct  traits< Product< Lhs, Rhs, Option > >
class  dense_product_base
class  dense_product_base< Lhs, Rhs, Option, InnerProduct >
struct  evaluator< Product< Lhs, Rhs, Options > >
struct  evaluator_assume_aliasing< CwiseUnaryOp< internal::scalar_multiple_op< Scalar >, const Product< Lhs, Rhs, DefaultProduct > > >
struct  evaluator< CwiseUnaryOp< internal::scalar_multiple_op< Scalar >, const Product< Lhs, Rhs, DefaultProduct > > >
struct  evaluator< Diagonal< const Product< Lhs, Rhs, DefaultProduct >, DiagIndex > >
struct  evaluator_assume_aliasing< Product< Lhs, Rhs, DefaultProduct > >
struct  product_evaluator< Product< Lhs, Rhs, Options >, ProductTag, LhsShape, RhsShape >
struct  Assignment< DstXprType, Product< Lhs, Rhs, Options >, internal::assign_op< Scalar >, Dense2Dense, typename enable_if<(Options==DefaultProduct||Options==AliasFreeProduct), Scalar >::type >
struct  Assignment< DstXprType, Product< Lhs, Rhs, Options >, internal::add_assign_op< Scalar >, Dense2Dense, typename enable_if<(Options==DefaultProduct||Options==AliasFreeProduct), Scalar >::type >
struct  Assignment< DstXprType, Product< Lhs, Rhs, Options >, internal::sub_assign_op< Scalar >, Dense2Dense, typename enable_if<(Options==DefaultProduct||Options==AliasFreeProduct), Scalar >::type >
struct  Assignment< DstXprType, CwiseUnaryOp< internal::scalar_multiple_op< ScalarBis >, const Product< Lhs, Rhs, DefaultProduct > >, AssignFunc, Dense2Dense, Scalar >
struct  evaluator_assume_aliasing< CwiseBinaryOp< internal::scalar_sum_op< typename OtherXpr::Scalar >, const OtherXpr, const Product< Lhs, Rhs, DefaultProduct > >, DenseShape >
struct  assignment_from_xpr_plus_product
struct  Assignment< DstXprType, CwiseBinaryOp< internal::scalar_sum_op< Scalar >, const OtherXpr, const Product< Lhs, Rhs, DefaultProduct > >, internal::assign_op< Scalar >, Dense2Dense >
struct  Assignment< DstXprType, CwiseBinaryOp< internal::scalar_sum_op< Scalar >, const OtherXpr, const Product< Lhs, Rhs, DefaultProduct > >, internal::add_assign_op< Scalar >, Dense2Dense >
struct  Assignment< DstXprType, CwiseBinaryOp< internal::scalar_sum_op< Scalar >, const OtherXpr, const Product< Lhs, Rhs, DefaultProduct > >, internal::sub_assign_op< Scalar >, Dense2Dense >
struct  generic_product_impl< Lhs, Rhs, DenseShape, DenseShape, InnerProduct >
struct  generic_product_impl< Lhs, Rhs, DenseShape, DenseShape, OuterProduct >
struct  generic_product_impl_base
struct  generic_product_impl< Lhs, Rhs, DenseShape, DenseShape, GemvProduct >
struct  generic_product_impl< Lhs, Rhs, DenseShape, DenseShape, CoeffBasedProductMode >
struct  generic_product_impl< Lhs, Rhs, DenseShape, DenseShape, LazyCoeffBasedProductMode >
struct  product_evaluator< Product< Lhs, Rhs, LazyProduct >, ProductTag, DenseShape, DenseShape >
struct  product_evaluator< Product< Lhs, Rhs, DefaultProduct >, LazyCoeffBasedProductMode, DenseShape, DenseShape >
struct  etor_product_packet_impl< RowMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode >
struct  etor_product_packet_impl< ColMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode >
struct  etor_product_packet_impl< RowMajor, 1, Lhs, Rhs, Packet, LoadMode >
struct  etor_product_packet_impl< ColMajor, 1, Lhs, Rhs, Packet, LoadMode >
struct  etor_product_packet_impl< RowMajor, 0, Lhs, Rhs, Packet, LoadMode >
struct  etor_product_packet_impl< ColMajor, 0, Lhs, Rhs, Packet, LoadMode >
struct  etor_product_packet_impl< RowMajor, Dynamic, Lhs, Rhs, Packet, LoadMode >
struct  etor_product_packet_impl< ColMajor, Dynamic, Lhs, Rhs, Packet, LoadMode >
struct  generic_product_impl< Lhs, Rhs, TriangularShape, DenseShape, ProductTag >
struct  generic_product_impl< Lhs, Rhs, DenseShape, TriangularShape, ProductTag >
struct  generic_product_impl< Lhs, Rhs, SelfAdjointShape, DenseShape, ProductTag >
struct  generic_product_impl< Lhs, Rhs, DenseShape, SelfAdjointShape, ProductTag >
struct  diagonal_product_evaluator_base
struct  product_evaluator< Product< Lhs, Rhs, ProductKind >, ProductTag, DiagonalShape, DenseShape >
struct  product_evaluator< Product< Lhs, Rhs, ProductKind >, ProductTag, DenseShape, DiagonalShape >
struct  permutation_matrix_product< ExpressionType, Side, Transposed, DenseShape >
struct  generic_product_impl< Lhs, Rhs, PermutationShape, MatrixShape, ProductTag >
struct  generic_product_impl< Lhs, Rhs, MatrixShape, PermutationShape, ProductTag >
struct  generic_product_impl< Inverse< Lhs >, Rhs, PermutationShape, MatrixShape, ProductTag >
struct  generic_product_impl< Lhs, Inverse< Rhs >, MatrixShape, PermutationShape, ProductTag >
class  transposition_matrix_product
struct  generic_product_impl< Lhs, Rhs, TranspositionsShape, MatrixShape, ProductTag >
struct  generic_product_impl< Lhs, Rhs, MatrixShape, TranspositionsShape, ProductTag >
struct  generic_product_impl< Transpose< Lhs >, Rhs, TranspositionsShape, MatrixShape, ProductTag >
struct  generic_product_impl< Lhs, Transpose< Rhs >, MatrixShape, TranspositionsShape, ProductTag >
struct  CacheSizes
struct  gebp_madd_selector
struct  gebp_madd_selector< CJ, T, T, T, T >
class  gebp_traits
class  gebp_traits< std::complex< RealScalar >, RealScalar, _ConjLhs, false >
struct  DoublePacket
struct  unpacket_traits< DoublePacket< Packet > >
class  gebp_traits< std::complex< RealScalar >, std::complex< RealScalar >, _ConjLhs, _ConjRhs >
class  gebp_traits< RealScalar, std::complex< RealScalar >, false, _ConjRhs >
struct  PossiblyRotatingKernelHelper
struct  PossiblyRotatingKernelHelper< GebpKernel, true >
struct  gebp_kernel
struct  gemm_pack_lhs< Scalar, Index, DataMapper, Pack1, Pack2, ColMajor, Conjugate, PanelMode >
struct  gemm_pack_lhs< Scalar, Index, DataMapper, Pack1, Pack2, RowMajor, Conjugate, PanelMode >
struct  gemm_pack_rhs< Scalar, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode >
struct  gemm_pack_rhs< Scalar, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode >
struct  general_matrix_matrix_product< Index, LhsScalar, LhsStorageOrder, ConjugateLhs, RhsScalar, RhsStorageOrder, ConjugateRhs, RowMajor >
struct  general_matrix_matrix_product< Index, LhsScalar, LhsStorageOrder, ConjugateLhs, RhsScalar, RhsStorageOrder, ConjugateRhs, ColMajor >
struct  gemm_functor
class  level3_blocking
class  gemm_blocking_space< StorageOrder, _LhsScalar, _RhsScalar, MaxRows, MaxCols, MaxDepth, KcFactor, true >
class  gemm_blocking_space< StorageOrder, _LhsScalar, _RhsScalar, MaxRows, MaxCols, MaxDepth, KcFactor, false >
struct  generic_product_impl< Lhs, Rhs, DenseShape, DenseShape, GemmProduct >
struct  general_matrix_matrix_triangular_product< Index, LhsScalar, LhsStorageOrder, ConjugateLhs, RhsScalar, RhsStorageOrder, ConjugateRhs, RowMajor, UpLo, Version >
struct  general_matrix_matrix_triangular_product< Index, LhsScalar, LhsStorageOrder, ConjugateLhs, RhsScalar, RhsStorageOrder, ConjugateRhs, ColMajor, UpLo, Version >
struct  tribb_kernel
struct  general_matrix_matrix_rankupdate
struct  general_matrix_vector_product< Index, LhsScalar, LhsMapper, ColMajor, ConjugateLhs, RhsScalar, RhsMapper, ConjugateRhs, Version >
struct  general_matrix_vector_product< Index, LhsScalar, LhsMapper, RowMajor, ConjugateLhs, RhsScalar, RhsMapper, ConjugateRhs, Version >
struct  GemmParallelInfo
struct  symm_pack_lhs
struct  symm_pack_rhs
struct  product_selfadjoint_matrix< Scalar, Index, LhsStorageOrder, LhsSelfAdjoint, ConjugateLhs, RhsStorageOrder, RhsSelfAdjoint, ConjugateRhs, RowMajor >
struct  product_selfadjoint_matrix< Scalar, Index, LhsStorageOrder, true, ConjugateLhs, RhsStorageOrder, false, ConjugateRhs, ColMajor >
struct  product_selfadjoint_matrix< Scalar, Index, LhsStorageOrder, false, ConjugateLhs, RhsStorageOrder, true, ConjugateRhs, ColMajor >
struct  selfadjoint_product_impl< Lhs, LhsMode, false, Rhs, RhsMode, false >
struct  selfadjoint_matrix_vector_product
struct  selfadjoint_product_impl< Lhs, LhsMode, false, Rhs, 0, true >
struct  selfadjoint_product_impl< Lhs, 0, true, Rhs, RhsMode, false >
struct  selfadjoint_matrix_vector_product_symv
struct  selfadjoint_rank2_update_selector< Scalar, Index, UType, VType, Lower >
struct  selfadjoint_rank2_update_selector< Scalar, Index, UType, VType, Upper >
struct  conj_expr_if
struct  product_triangular_matrix_matrix< Scalar, Index, Mode, LhsIsTriangular, LhsStorageOrder, ConjugateLhs, RhsStorageOrder, ConjugateRhs, RowMajor, Version >
struct  product_triangular_matrix_matrix< Scalar, Index, Mode, true, LhsStorageOrder, ConjugateLhs, RhsStorageOrder, ConjugateRhs, ColMajor, Version >
struct  product_triangular_matrix_matrix< Scalar, Index, Mode, false, LhsStorageOrder, ConjugateLhs, RhsStorageOrder, ConjugateRhs, ColMajor, Version >
struct  triangular_product_impl< Mode, LhsIsTriangular, Lhs, false, Rhs, false >
struct  product_triangular_matrix_matrix_trmm
struct  triangular_matrix_vector_product< Index, Mode, LhsScalar, ConjLhs, RhsScalar, ConjRhs, ColMajor, Version >
struct  triangular_matrix_vector_product< Index, Mode, LhsScalar, ConjLhs, RhsScalar, ConjRhs, RowMajor, Version >
struct  triangular_product_impl< Mode, true, Lhs, false, Rhs, true >
struct  triangular_product_impl< Mode, false, Lhs, true, Rhs, false >
struct  trmv_selector< Mode, ColMajor >
struct  trmv_selector< Mode, RowMajor >
struct  triangular_matrix_vector_product_trmv
struct  triangular_solve_matrix< Scalar, Index, Side, Mode, Conjugate, TriStorageOrder, RowMajor >
struct  triangular_solve_matrix< Scalar, Index, OnTheLeft, Mode, Conjugate, TriStorageOrder, ColMajor >
struct  triangular_solve_matrix< Scalar, Index, OnTheRight, Mode, Conjugate, TriStorageOrder, ColMajor >
struct  triangular_solve_vector< LhsScalar, RhsScalar, Index, OnTheRight, Mode, Conjugate, StorageOrder >
struct  triangular_solve_vector< LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Conjugate, RowMajor >
struct  triangular_solve_vector< LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Conjugate, ColMajor >
struct  scalar_random_op
struct  functor_traits< scalar_random_op< Scalar > >
struct  redux_traits
struct  redux_novec_unroller
struct  redux_novec_unroller< Func, Derived, Start, 1 >
struct  redux_novec_unroller< Func, Derived, Start, 0 >
struct  redux_vec_unroller
struct  redux_vec_unroller< Func, Derived, Start, 1 >
struct  redux_impl< Func, Derived, DefaultTraversal, NoUnrolling >
struct  redux_impl< Func, Derived, DefaultTraversal, CompleteUnrolling >
struct  redux_impl< Func, Derived, LinearVectorizedTraversal, NoUnrolling >
struct  redux_impl< Func, Derived, SliceVectorizedTraversal, Unrolling >
struct  redux_impl< Func, Derived, LinearVectorizedTraversal, CompleteUnrolling >
class  redux_evaluator
struct  traits< Ref< _PlainObjectType, _Options, _StrideType > >
struct  traits< RefBase< Derived > >
struct  traits< Replicate< MatrixType, RowFactor, ColFactor > >
struct  traits< ReturnByValue< Derived > >
struct  nested_eval< ReturnByValue< Derived >, n, PlainObject >
struct  evaluator< ReturnByValue< Derived > >
struct  traits< Reverse< MatrixType, Direction > >
struct  reverse_packet_cond
struct  reverse_packet_cond< PacketType, false >
struct  vectorwise_reverse_inplace_impl< Vertical >
struct  vectorwise_reverse_inplace_impl< Horizontal >
struct  traits< Select< ConditionMatrixType, ThenMatrixType, ElseMatrixType > >
struct  traits< SelfAdjointView< MatrixType, UpLo > >
struct  evaluator_traits< SelfAdjointView< MatrixType, Mode > >
class  triangular_dense_assignment_kernel< UpLo, SelfAdjoint, SetOpposite, DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor, Version >
struct  solve_traits< Decomposition, RhsType, Dense >
struct  traits< Solve< Decomposition, RhsType > >
struct  evaluator< Solve< Decomposition, RhsType > >
struct  Assignment< DstXprType, Solve< DecType, RhsType >, internal::assign_op< Scalar >, Dense2Dense, Scalar >
struct  Assignment< DstXprType, Solve< Transpose< const DecType >, RhsType >, internal::assign_op< Scalar >, Dense2Dense, Scalar >
struct  Assignment< DstXprType, Solve< CwiseUnaryOp< internal::scalar_conjugate_op< typename DecType::Scalar >, const Transpose< const DecType > >, RhsType >, internal::assign_op< Scalar >, Dense2Dense, Scalar >
struct  generic_xpr_base< Derived, MatrixXpr, SolverStorage >
class  trsolve_traits
struct  triangular_solver_selector< Lhs, Rhs, Side, Mode, NoUnrolling, 1 >
struct  triangular_solver_selector< Lhs, Rhs, Side, Mode, NoUnrolling, Dynamic >
struct  triangular_solver_unroller< Lhs, Rhs, Mode, LoopIndex, Size, false >
struct  triangular_solver_unroller< Lhs, Rhs, Mode, LoopIndex, Size, true >
struct  triangular_solver_selector< Lhs, Rhs, OnTheLeft, Mode, CompleteUnrolling, 1 >
struct  triangular_solver_selector< Lhs, Rhs, OnTheRight, Mode, CompleteUnrolling, 1 >
struct  traits< triangular_solve_retval< Side, TriangularType, Rhs > >
struct  triangular_solve_retval
struct  lgamma_impl
struct  lgamma_retval
struct  digamma_retval
struct  digamma_impl
struct  erf_impl
struct  erf_retval
struct  erfc_impl
struct  erfc_retval
class  generic_dense_assignment_kernel< DstEvaluatorTypeT, SrcEvaluatorTypeT, swap_assign_op< typename DstEvaluatorTypeT::Scalar >, Specialized >
struct  traits< Transpose< MatrixType > >
struct  TransposeImpl_base
struct  TransposeImpl_base< MatrixType, false >
struct  inplace_transpose_selector< MatrixType, true, false >
struct  inplace_transpose_selector< MatrixType, true, true >
struct  inplace_transpose_selector< MatrixType, false, MatchPacketSize >
struct  check_transpose_aliasing_compile_time_selector
struct  check_transpose_aliasing_compile_time_selector< DestIsTransposed, CwiseBinaryOp< BinOp, DerivedA, DerivedB > >
struct  check_transpose_aliasing_run_time_selector
struct  check_transpose_aliasing_run_time_selector< Scalar, DestIsTransposed, CwiseBinaryOp< BinOp, DerivedA, DerivedB > >
struct  checkTransposeAliasing_impl
struct  checkTransposeAliasing_impl< Derived, OtherDerived, false >
struct  traits< Transpositions< SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex > >
struct  traits< Map< Transpositions< SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex >, _PacketAccess > >
struct  traits< TranspositionsWrapper< _IndicesType > >
struct  traits< Transpose< TranspositionsBase< Derived > > >
struct  traits< TriangularView< MatrixType, _Mode > >
struct  evaluator_traits< TriangularView< MatrixType, Mode > >
struct  unary_evaluator< TriangularView< MatrixType, Mode >, IndexBased >
struct  Triangular2Triangular
struct  Triangular2Dense
struct  Dense2Triangular
class  triangular_dense_assignment_kernel
struct  AssignmentKind< TriangularShape, TriangularShape >
struct  AssignmentKind< DenseShape, TriangularShape >
struct  AssignmentKind< TriangularShape, DenseShape >
struct  Assignment< DstXprType, SrcXprType, Functor, Triangular2Triangular, Scalar >
struct  Assignment< DstXprType, SrcXprType, Functor, Triangular2Dense, Scalar >
struct  Assignment< DstXprType, SrcXprType, Functor, Dense2Triangular, Scalar >
struct  triangular_assignment_loop
struct  triangular_assignment_loop< Kernel, Mode, 0, SetOpposite >
struct  triangular_assignment_loop< Kernel, Mode, Dynamic, SetOpposite >
struct  Assignment< DstXprType, Product< Lhs, Rhs, DefaultProduct >, internal::assign_op< Scalar >, Dense2Triangular, Scalar >
struct  Assignment< DstXprType, Product< Lhs, Rhs, DefaultProduct >, internal::add_assign_op< Scalar >, Dense2Triangular, Scalar >
struct  Assignment< DstXprType, Product< Lhs, Rhs, DefaultProduct >, internal::sub_assign_op< Scalar >, Dense2Triangular, Scalar >
struct  conj_if< true >
struct  conj_if< false >
struct  conj_helper< Scalar, Scalar, false, false >
struct  conj_helper< std::complex< RealScalar >, std::complex< RealScalar >, false, true >
struct  conj_helper< std::complex< RealScalar >, std::complex< RealScalar >, true, false >
struct  conj_helper< std::complex< RealScalar >, std::complex< RealScalar >, true, true >
struct  conj_helper< std::complex< RealScalar >, RealScalar, Conj, false >
struct  conj_helper< RealScalar, std::complex< RealScalar >, false, Conj >
struct  get_factor
struct  get_factor< Scalar, typename NumTraits< Scalar >::Real >
class  BlasVectorMapper
class  BlasLinearMapper
class  blas_data_mapper
class  const_blas_data_mapper
struct  blas_traits
struct  blas_traits< CwiseUnaryOp< scalar_conjugate_op< Scalar >, NestedXpr > >
struct  blas_traits< CwiseUnaryOp< scalar_multiple_op< Scalar >, NestedXpr > >
struct  blas_traits< CwiseUnaryOp< scalar_opposite_op< Scalar >, NestedXpr > >
struct  blas_traits< Transpose< NestedXpr > >
struct  blas_traits< const T >
struct  extract_data_selector
struct  extract_data_selector< T, false >
struct  IndexBased
struct  IteratorBased
struct  traits< const T >
struct  has_direct_access
struct  accessors_level
struct  stem_function
struct  smart_copy_helper< T, true >
struct  smart_copy_helper< T, false >
struct  smart_memmove_helper< T, true >
struct  smart_memmove_helper< T, false >
class  aligned_stack_memory_handler
class  scoped_array
struct  true_type
struct  false_type
struct  conditional
struct  conditional< false, Then, Else >
struct  is_same
struct  is_same< T, T >
struct  remove_reference
struct  remove_reference< T & >
struct  remove_pointer
struct  remove_pointer< T * >
struct  remove_pointer< T *const >
struct  remove_const
struct  remove_const< const T >
struct  remove_const< const T[]>
struct  remove_const< const T[Size]>
struct  remove_all
struct  remove_all< const T >
struct  remove_all< T const & >
struct  remove_all< T & >
struct  remove_all< T const * >
struct  remove_all< T * >
struct  is_arithmetic
struct  is_arithmetic< float >
struct  is_arithmetic< double >
struct  is_arithmetic< long double >
struct  is_arithmetic< bool >
struct  is_arithmetic< char >
struct  is_arithmetic< signed char >
struct  is_arithmetic< unsigned char >
struct  is_arithmetic< signed short >
struct  is_arithmetic< unsigned short >
struct  is_arithmetic< signed int >
struct  is_arithmetic< unsigned int >
struct  is_arithmetic< signed long >
struct  is_arithmetic< unsigned long >
struct  is_integral
struct  is_integral< bool >
struct  is_integral< char >
struct  is_integral< signed char >
struct  is_integral< unsigned char >
struct  is_integral< signed short >
struct  is_integral< unsigned short >
struct  is_integral< signed int >
struct  is_integral< unsigned int >
struct  is_integral< signed long >
struct  is_integral< unsigned long >
struct  add_const
struct  add_const< T & >
struct  is_const
struct  is_const< T const >
struct  add_const_on_value_type
struct  add_const_on_value_type< T & >
struct  add_const_on_value_type< T * >
struct  add_const_on_value_type< T *const >
struct  add_const_on_value_type< T const *const >
struct  is_convertible_impl
struct  is_convertible
struct  enable_if< true, T >
class  noncopyable
struct  result_of
struct  has_none
struct  has_std_result_type
struct  has_tr1_result
struct  unary_result_of_select
struct  unary_result_of_select< Func, ArgType, sizeof(has_std_result_type)>
struct  unary_result_of_select< Func, ArgType, sizeof(has_tr1_result)>
struct  result_of< Func(ArgType)>
struct  binary_result_of_select
struct  binary_result_of_select< Func, ArgType0, ArgType1, sizeof(has_std_result_type)>
struct  binary_result_of_select< Func, ArgType0, ArgType1, sizeof(has_tr1_result)>
struct  result_of< Func(ArgType0, ArgType1)>
class  meta_sqrt
class  meta_sqrt< Y, InfX, SupX, true >
struct  meta_least_common_multiple
struct  meta_least_common_multiple< A, B, K, true >
struct  scalar_product_traits
struct  scalar_product_traits< T, T >
struct  scalar_product_traits< T, std::complex< T > >
struct  scalar_product_traits< std::complex< T >, T >
struct  static_assertion
struct  static_assertion< true >
class  no_assignment_operator
struct  promote_index_type
class  variable_if_dynamic
class  variable_if_dynamic< T, Dynamic >
class  variable_if_dynamicindex
class  variable_if_dynamicindex< T, DynamicIndex >
struct  functor_traits
struct  unpacket_traits
struct  find_best_packet_helper< Size, PacketType, true >
struct  find_best_packet_helper< Size, PacketType, false >
struct  find_best_packet
struct  compute_default_alignment_helper
struct  compute_default_alignment
struct  compute_default_alignment< T, Dynamic >
class  make_proper_matrix_type
class  compute_matrix_flags
struct  size_at_compile_time
struct  size_of_xpr_at_compile_time
struct  plain_matrix_type< T, Dense >
struct  plain_matrix_type< T, DiagonalShape >
struct  plain_matrix_type_dense< T, MatrixXpr, Flags >
struct  plain_matrix_type_dense< T, ArrayXpr, Flags >
struct  eval< T, Dense >
struct  eval< T, DiagonalShape >
struct  eval< Matrix< _Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols >, Dense >
struct  eval< Array< _Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols >, Dense >
struct  plain_object_eval< T, Dense >
struct  plain_matrix_type_column_major
struct  plain_matrix_type_row_major
struct  ref_selector
struct  transfer_constness
struct  nested_eval
struct  dense_xpr_base
struct  dense_xpr_base< Derived, MatrixXpr >
struct  dense_xpr_base< Derived, ArrayXpr >
struct  generic_xpr_base< Derived, XprKind, Dense >
struct  special_scalar_op_base
struct  special_scalar_op_base< Derived, Scalar, OtherScalar, BaseType, true >
struct  cast_return_type
struct  promote_storage_type< A, A >
struct  promote_storage_type< A, const A >
struct  promote_storage_type< const A, A >
struct  cwise_promote_storage_type< A, A, Functor >
struct  cwise_promote_storage_type< Dense, Dense, Functor >
struct  cwise_promote_storage_type< A, Dense, Functor >
struct  cwise_promote_storage_type< Dense, B, Functor >
struct  cwise_promote_storage_type< Sparse, Dense, Functor >
struct  cwise_promote_storage_type< Dense, Sparse, Functor >
struct  product_promote_storage_type< A, A, ProductTag >
struct  product_promote_storage_type< Dense, Dense, ProductTag >
struct  product_promote_storage_type< A, Dense, ProductTag >
struct  product_promote_storage_type< Dense, B, ProductTag >
struct  product_promote_storage_type< A, DiagonalShape, ProductTag >
struct  product_promote_storage_type< DiagonalShape, B, ProductTag >
struct  product_promote_storage_type< Dense, DiagonalShape, ProductTag >
struct  product_promote_storage_type< DiagonalShape, Dense, ProductTag >
struct  product_promote_storage_type< A, PermutationStorage, ProductTag >
struct  product_promote_storage_type< PermutationStorage, B, ProductTag >
struct  product_promote_storage_type< Dense, PermutationStorage, ProductTag >
struct  product_promote_storage_type< PermutationStorage, Dense, ProductTag >
struct  plain_row_type
struct  plain_col_type
struct  plain_diag_type
struct  is_lvalue
struct  is_diagonal
struct  is_diagonal< DiagonalBase< T > >
struct  is_diagonal< DiagonalWrapper< T > >
struct  is_diagonal< DiagonalMatrix< T, S > >
struct  glue_shapes< DenseShape, TriangularShape >
struct  is_same_or_void
struct  is_same_or_void< void, T >
struct  is_same_or_void< T, void >
struct  is_same_or_void< void, void >
struct  traits< VectorBlock< VectorType, Size > >
struct  traits< PartialReduxExpr< MatrixType, MemberOp, Direction > >
struct  member_lpnorm
struct  member_redux
struct  visitor_impl
struct  visitor_impl< Visitor, Derived, 1 >
struct  visitor_impl< Visitor, Derived, Dynamic >
class  visitor_evaluator
struct  coeff_visitor
 Base class to implement min and max visitors. More...
struct  min_coeff_visitor
 Visitor computing the min coefficient with its value and coordinates. More...
struct  functor_traits< min_coeff_visitor< Scalar > >
struct  max_coeff_visitor
 Visitor computing the max coefficient with its value and coordinates. More...
struct  functor_traits< max_coeff_visitor< Scalar > >
struct  complex_schur_reduce_to_hessenberg
struct  complex_schur_reduce_to_hessenberg< MatrixType, false >
struct  traits< HessenbergDecompositionMatrixHReturnType< MatrixType > >
struct  HessenbergDecompositionMatrixHReturnType
 Expression type for return value of HessenbergDecomposition::matrixH() More...
struct  eigenvalues_selector
struct  eigenvalues_selector< Derived, false >
struct  direct_selfadjoint_eigenvalues
struct  direct_selfadjoint_eigenvalues< SolverType, 3, false >
struct  direct_selfadjoint_eigenvalues< SolverType, 2, false >
struct  traits< TridiagonalizationMatrixTReturnType< MatrixType > >
struct  tridiagonalization_inplace_selector
struct  tridiagonalization_inplace_selector< MatrixType, 3, false >
struct  tridiagonalization_inplace_selector< MatrixType, 1, IsComplex >
struct  TridiagonalizationMatrixTReturnType
 Expression type for return value of Tridiagonalization::matrixT() More...
struct  traits< AngleAxis< _Scalar > >
struct  quat_product< Architecture::SSE, Derived, OtherDerived, float, Aligned16 >
struct  quat_conj< Architecture::SSE, Derived, float, Alignment >
struct  cross3_impl< Architecture::SSE, VectorLhs, VectorRhs, float, true >
struct  quat_product< Architecture::SSE, Derived, OtherDerived, double, Alignment >
struct  quat_conj< Architecture::SSE, Derived, double, Alignment >
struct  traits< Homogeneous< MatrixType, Direction > >
struct  take_matrix_for_product
struct  take_matrix_for_product< Transform< Scalar, Dim, Mode, Options > >
struct  take_matrix_for_product< Transform< Scalar, Dim, Projective, Options > >
struct  traits< homogeneous_left_product_impl< Homogeneous< MatrixType, Vertical >, Lhs > >
struct  homogeneous_left_product_impl< Homogeneous< MatrixType, Vertical >, Lhs >
struct  traits< homogeneous_right_product_impl< Homogeneous< MatrixType, Horizontal >, Rhs > >
struct  homogeneous_right_product_impl< Homogeneous< MatrixType, Horizontal >, Rhs >
struct  evaluator_traits< Homogeneous< ArgType, Direction > >
struct  AssignmentKind< DenseShape, HomogeneousShape >
struct  unary_evaluator< Homogeneous< ArgType, Direction >, IndexBased >
struct  Assignment< DstXprType, Homogeneous< ArgType, Vertical >, internal::assign_op< Scalar >, Dense2Dense, Scalar >
struct  Assignment< DstXprType, Homogeneous< ArgType, Horizontal >, internal::assign_op< Scalar >, Dense2Dense, Scalar >
struct  generic_product_impl< Homogeneous< LhsArg, Horizontal >, Rhs, HomogeneousShape, DenseShape, ProductTag >
struct  homogeneous_right_product_refactoring_helper
struct  product_evaluator< Product< Lhs, Rhs, LazyProduct >, ProductTag, HomogeneousShape, DenseShape >
struct  generic_product_impl< Lhs, Homogeneous< RhsArg, Vertical >, DenseShape, HomogeneousShape, ProductTag >
struct  homogeneous_left_product_refactoring_helper
struct  product_evaluator< Product< Lhs, Rhs, LazyProduct >, ProductTag, DenseShape, HomogeneousShape >
struct  generic_product_impl< Transform< Scalar, Dim, Mode, Options >, Homogeneous< RhsArg, Vertical >, DenseShape, HomogeneousShape, ProductTag >
struct  permutation_matrix_product< ExpressionType, Side, Transposed, HomogeneousShape >
struct  cross3_impl
struct  unitOrthogonal_selector
struct  unitOrthogonal_selector< Derived, 3 >
struct  unitOrthogonal_selector< Derived, 2 >
struct  traits< Quaternion< _Scalar, _Options > >
struct  traits< Map< Quaternion< _Scalar >, _Options > >
struct  traits< Map< const Quaternion< _Scalar >, _Options > >
struct  quat_product
struct  quat_conj
struct  quaternionbase_assign_impl< Other, 3, 3 >
struct  quaternionbase_assign_impl< Other, 4, 1 >
struct  traits< Rotation2D< _Scalar > >
struct  rotation_base_generic_product_selector< RotationDerived, MatrixType, false >
struct  rotation_base_generic_product_selector< RotationDerived, DiagonalMatrix< Scalar, Dim, MaxDim >, false >
struct  rotation_base_generic_product_selector< RotationDerived, OtherVectorType, true >
struct  transform_traits
struct  traits< Transform< _Scalar, _Dim, _Mode, _Options > >
struct  transform_make_affine
struct  transform_make_affine< AffineCompact >
struct  projective_transform_inverse
struct  projective_transform_inverse< TransformType, Projective >
struct  transform_take_affine_part
struct  transform_take_affine_part< Transform< Scalar, Dim, AffineCompact, Options > >
struct  transform_construct_from_matrix< Other, Mode, Options, Dim, HDim, Dim, Dim >
struct  transform_construct_from_matrix< Other, Mode, Options, Dim, HDim, Dim, HDim >
struct  transform_construct_from_matrix< Other, Mode, Options, Dim, HDim, HDim, HDim >
struct  transform_construct_from_matrix< Other, AffineCompact, Options, Dim, HDim, HDim, HDim >
struct  transform_product_result
struct  transform_right_product_impl< TransformType, MatrixType, 0 >
struct  transform_right_product_impl< TransformType, MatrixType, 1 >
struct  transform_right_product_impl< TransformType, MatrixType, 2 >
struct  transform_left_product_impl< Other, Mode, Options, Dim, HDim, HDim, HDim >
struct  transform_left_product_impl< Other, AffineCompact, Options, Dim, HDim, HDim, HDim >
struct  transform_left_product_impl< Other, Mode, Options, Dim, HDim, Dim, HDim >
struct  transform_left_product_impl< Other, AffineCompact, Options, Dim, HDim, Dim, HDim >
struct  transform_left_product_impl< Other, Mode, Options, Dim, HDim, Dim, Dim >
struct  transform_transform_product_impl< Transform< Scalar, Dim, LhsMode, LhsOptions >, Transform< Scalar, Dim, RhsMode, RhsOptions >, false >
struct  transform_transform_product_impl< Transform< Scalar, Dim, LhsMode, LhsOptions >, Transform< Scalar, Dim, RhsMode, RhsOptions >, true >
struct  transform_transform_product_impl< Transform< Scalar, Dim, AffineCompact, LhsOptions >, Transform< Scalar, Dim, Projective, RhsOptions >, true >
struct  transform_transform_product_impl< Transform< Scalar, Dim, Projective, LhsOptions >, Transform< Scalar, Dim, AffineCompact, RhsOptions >, true >
struct  umeyama_transform_matrix_type
struct  decrement_size
struct  traits< HouseholderSequence< VectorsType, CoeffsType, Side > >
struct  HouseholderSequenceShape
struct  evaluator_traits< HouseholderSequence< VectorsType, CoeffsType, Side > >
struct  hseq_side_dependent_impl
struct  hseq_side_dependent_impl< VectorsType, CoeffsType, OnTheRight >
struct  matrix_type_times_scalar_type
struct  traits< BiCGSTAB< _MatrixType, _Preconditioner > >
struct  traits< ConjugateGradient< _MatrixType, _UpLo, _Preconditioner > >
struct  is_ref_compatible_impl
struct  is_ref_compatible
class  generic_matrix_wrapper< MatrixType, false >
class  generic_matrix_wrapper< MatrixType, true >
struct  traits< LeastSquaresConjugateGradient< _MatrixType, _Preconditioner > >
struct  traits< SolveWithGuess< Decomposition, RhsType, GuessType > >
struct  evaluator< SolveWithGuess< Decomposition, RhsType, GuessType > >
struct  Assignment< DstXprType, SolveWithGuess< DecType, RhsType, GuessType >, internal::assign_op< Scalar >, Dense2Dense, Scalar >
struct  compute_inverse_size4< Architecture::SSE, float, MatrixType, ResultType >
struct  compute_inverse_size4< Architecture::SSE, double, MatrixType, ResultType >
struct  determinant_impl
struct  determinant_impl< Derived, 1 >
struct  determinant_impl< Derived, 2 >
struct  determinant_impl< Derived, 3 >
struct  determinant_impl< Derived, 4 >
struct  traits< FullPivLU< _MatrixType > >
struct  kernel_retval< FullPivLU< _MatrixType > >
struct  image_retval< FullPivLU< _MatrixType > >
struct  Assignment< DstXprType, Inverse< FullPivLU< MatrixType > >, internal::assign_op< Scalar >, Dense2Dense, Scalar >
struct  compute_inverse
struct  compute_inverse_and_det_with_check
struct  compute_inverse< MatrixType, ResultType, 1 >
struct  compute_inverse_and_det_with_check< MatrixType, ResultType, 1 >
struct  compute_inverse< MatrixType, ResultType, 2 >
struct  compute_inverse_and_det_with_check< MatrixType, ResultType, 2 >
struct  compute_inverse< MatrixType, ResultType, 3 >
struct  compute_inverse_and_det_with_check< MatrixType, ResultType, 3 >
struct  compute_inverse_size4
struct  compute_inverse< MatrixType, ResultType, 4 >
struct  compute_inverse_and_det_with_check< MatrixType, ResultType, 4 >
struct  Assignment< DstXprType, Inverse< XprType >, internal::assign_op< Scalar >, Dense2Dense, Scalar >
struct  traits< PartialPivLU< _MatrixType > >
struct  partial_lu_impl
struct  Assignment< DstXprType, Inverse< PartialPivLU< MatrixType > >, internal::assign_op< Scalar >, Dense2Dense, Scalar >
struct  traits< image_retval_base< DecompositionType > >
class  image_retval_base
struct  traits< kernel_retval_base< DecompositionType > >
class  kernel_retval_base
struct  colamd_col
struct  Colamd_Row
struct  pardiso_run_selector
struct  pardiso_run_selector< long long int >
struct  pardiso_traits< PardisoLU< _MatrixType > >
struct  pardiso_traits< PardisoLLT< _MatrixType, Options > >
struct  pardiso_traits< PardisoLDLT< _MatrixType, Options > >
struct  pastix_traits< PastixLU< _MatrixType > >
struct  pastix_traits< PastixLLT< _MatrixType, Options > >
struct  pastix_traits< PastixLDLT< _MatrixType, Options > >
struct  traits< ColPivHouseholderQR< _MatrixType > >
struct  Assignment< DstXprType, Inverse< ColPivHouseholderQR< MatrixType > >, internal::assign_op< Scalar >, Dense2Dense, Scalar >
struct  traits< CompleteOrthogonalDecomposition< _MatrixType > >
struct  Assignment< DstXprType, Inverse< CompleteOrthogonalDecomposition< MatrixType > >, internal::assign_op< Scalar >, Dense2Dense, Scalar >
struct  traits< FullPivHouseholderQR< _MatrixType > >
struct  traits< FullPivHouseholderQRMatrixQReturnType< MatrixType > >
struct  Assignment< DstXprType, Inverse< FullPivHouseholderQR< MatrixType > >, internal::assign_op< Scalar >, Dense2Dense, Scalar >
struct  FullPivHouseholderQRMatrixQReturnType
 Expression type for return value of FullPivHouseholderQR::matrixQ() More...
struct  householder_qr_inplace_blocked
struct  simplicial_cholesky_grab_input
struct  simplicial_cholesky_grab_input< MatrixType, MatrixType >
struct  traits< SimplicialLLT< _MatrixType, _UpLo, _Ordering > >
struct  traits< SimplicialLDLT< _MatrixType, _UpLo, _Ordering > >
struct  traits< SimplicialCholesky< _MatrixType, _UpLo, _Ordering > >
class  AmbiVector
class  CompressedStorage
struct  conservative_sparse_sparse_product_selector< Lhs, Rhs, ResultType, ColMajor, ColMajor, ColMajor >
struct  conservative_sparse_sparse_product_selector< Lhs, Rhs, ResultType, RowMajor, ColMajor, ColMajor >
struct  conservative_sparse_sparse_product_selector< Lhs, Rhs, ResultType, ColMajor, RowMajor, ColMajor >
struct  conservative_sparse_sparse_product_selector< Lhs, Rhs, ResultType, RowMajor, RowMajor, ColMajor >
struct  conservative_sparse_sparse_product_selector< Lhs, Rhs, ResultType, ColMajor, ColMajor, RowMajor >
struct  conservative_sparse_sparse_product_selector< Lhs, Rhs, ResultType, RowMajor, ColMajor, RowMajor >
struct  conservative_sparse_sparse_product_selector< Lhs, Rhs, ResultType, ColMajor, RowMajor, RowMajor >
struct  conservative_sparse_sparse_product_selector< Lhs, Rhs, ResultType, RowMajor, RowMajor, RowMajor >
struct  sparse_sparse_to_dense_product_selector< Lhs, Rhs, ResultType, ColMajor, ColMajor >
struct  sparse_sparse_to_dense_product_selector< Lhs, Rhs, ResultType, RowMajor, ColMajor >
struct  sparse_sparse_to_dense_product_selector< Lhs, Rhs, ResultType, ColMajor, RowMajor >
struct  sparse_sparse_to_dense_product_selector< Lhs, Rhs, ResultType, RowMajor, RowMajor >
struct  traits< MappedSparseMatrix< _Scalar, _Flags, _StorageIndex > >
struct  evaluator< MappedSparseMatrix< _Scalar, _Options, _StorageIndex > >
struct  storage_kind_to_evaluator_kind< Sparse >
struct  storage_kind_to_shape< Sparse >
struct  Sparse2Sparse
struct  Sparse2Dense
struct  AssignmentKind< SparseShape, SparseShape >
struct  AssignmentKind< SparseShape, SparseTriangularShape >
struct  AssignmentKind< DenseShape, SparseShape >
struct  AssignmentKind< DenseShape, SparseTriangularShape >
struct  Assignment< DstXprType, SrcXprType, Functor, Sparse2Sparse, Scalar >
struct  Assignment< DstXprType, SrcXprType, Functor, Sparse2Dense, Scalar >
struct  Assignment< DstXprType, Solve< DecType, RhsType >, internal::assign_op< Scalar >, Sparse2Sparse, Scalar >
struct  Diagonal2Sparse
struct  AssignmentKind< SparseShape, DiagonalShape >
struct  Assignment< DstXprType, SrcXprType, Functor, Diagonal2Sparse, Scalar >
class  sparse_matrix_block_impl
struct  unary_evaluator< Block< ArgType, BlockRows, BlockCols, InnerPanel >, IteratorBased >
struct  unary_evaluator< Block< SparseMatrix< _Scalar, _Options, _StorageIndex >, BlockRows, BlockCols, true >, IteratorBased >
struct  unary_evaluator< Block< const SparseMatrix< _Scalar, _Options, _StorageIndex >, BlockRows, BlockCols, true >, IteratorBased >
struct  traits< SparseCompressedBase< Derived > >
struct  evaluator< SparseCompressedBase< Derived > >
struct  binary_evaluator< CwiseBinaryOp< BinaryOp, Lhs, Rhs >, IteratorBased, IteratorBased >
struct  binary_evaluator< CwiseBinaryOp< BinaryOp, Lhs, Rhs >, IndexBased, IteratorBased >
struct  binary_evaluator< CwiseBinaryOp< BinaryOp, Lhs, Rhs >, IteratorBased, IndexBased >
struct  binary_evaluator< CwiseBinaryOp< scalar_product_op< T >, Lhs, Rhs >, IteratorBased, IteratorBased >
struct  binary_evaluator< CwiseBinaryOp< scalar_product_op< T >, Lhs, Rhs >, IndexBased, IteratorBased >
struct  binary_evaluator< CwiseBinaryOp< scalar_product_op< T >, Lhs, Rhs >, IteratorBased, IndexBased >
struct  unary_evaluator< CwiseUnaryOp< UnaryOp, ArgType >, IteratorBased >
struct  unary_evaluator< CwiseUnaryView< ViewOp, ArgType >, IteratorBased >
struct  product_promote_storage_type< Sparse, Dense, OuterProduct >
struct  product_promote_storage_type< Dense, Sparse, OuterProduct >
struct  sparse_time_dense_product_impl< SparseLhsType, DenseRhsType, DenseResType, typename DenseResType::Scalar, RowMajor, true >
struct  scalar_product_traits< T1, Ref< T2 > >
struct  sparse_time_dense_product_impl< SparseLhsType, DenseRhsType, DenseResType, AlphaType, ColMajor, true >
struct  sparse_time_dense_product_impl< SparseLhsType, DenseRhsType, DenseResType, typename DenseResType::Scalar, RowMajor, false >
struct  sparse_time_dense_product_impl< SparseLhsType, DenseRhsType, DenseResType, typename DenseResType::Scalar, ColMajor, false >
struct  generic_product_impl< Lhs, Rhs, SparseShape, DenseShape, ProductType >
struct  generic_product_impl< Lhs, Rhs, SparseTriangularShape, DenseShape, ProductType >
struct  generic_product_impl< Lhs, Rhs, DenseShape, SparseShape, ProductType >
struct  generic_product_impl< Lhs, Rhs, DenseShape, SparseTriangularShape, ProductType >
struct  sparse_dense_outer_product_evaluator
struct  product_evaluator< Product< Lhs, Rhs, DefaultProduct >, OuterProduct, SparseShape, DenseShape >
struct  product_evaluator< Product< Lhs, Rhs, DefaultProduct >, OuterProduct, DenseShape, SparseShape >
struct  product_evaluator< Product< Lhs, Rhs, DefaultProduct >, ProductTag, DiagonalShape, SparseShape >
struct  product_evaluator< Product< Lhs, Rhs, DefaultProduct >, ProductTag, SparseShape, DiagonalShape >
struct  sparse_diagonal_product_evaluator< SparseXprType, DiagonalCoeffType, SDP_AsScalarProduct >
struct  sparse_diagonal_product_evaluator< SparseXprType, DiagCoeffType, SDP_AsCwiseProduct >
struct  traits< Map< SparseMatrix< MatScalar, MatOptions, MatIndex >, Options, StrideType > >
struct  traits< Map< const SparseMatrix< MatScalar, MatOptions, MatIndex >, Options, StrideType > >
struct  evaluator< Map< SparseMatrix< MatScalar, MatOptions, MatIndex >, Options, StrideType > >
struct  evaluator< Map< const SparseMatrix< MatScalar, MatOptions, MatIndex >, Options, StrideType > >
struct  traits< SparseMatrix< _Scalar, _Options, _Index > >
struct  traits< Diagonal< SparseMatrix< _Scalar, _Options, _Index >, DiagIndex > >
struct  traits< Diagonal< const SparseMatrix< _Scalar, _Options, _Index >, DiagIndex > >
struct  evaluator< SparseMatrix< _Scalar, _Options, _Index > >
struct  permutation_matrix_product< ExpressionType, Side, Transposed, SparseShape >
struct  product_promote_storage_type< Sparse, PermutationStorage, ProductTag >
struct  product_promote_storage_type< PermutationStorage, Sparse, ProductTag >
struct  product_evaluator< Product< Lhs, Rhs, AliasFreeProduct >, ProductTag, PermutationShape, SparseShape >
struct  product_evaluator< Product< Lhs, Rhs, AliasFreeProduct >, ProductTag, SparseShape, PermutationShape >
struct  generic_product_impl< Lhs, Rhs, SparseShape, SparseShape, ProductType >
struct  generic_product_impl< Lhs, Rhs, SparseShape, SparseTriangularShape, ProductType >
struct  generic_product_impl< Lhs, Rhs, SparseTriangularShape, SparseShape, ProductType >
struct  Assignment< DstXprType, Product< Lhs, Rhs, AliasFreeProduct >, internal::assign_op< typename DstXprType::Scalar >, Sparse2Dense >
struct  Assignment< DstXprType, Product< Lhs, Rhs, AliasFreeProduct >, internal::add_assign_op< typename DstXprType::Scalar >, Sparse2Dense >
struct  Assignment< DstXprType, Product< Lhs, Rhs, AliasFreeProduct >, internal::sub_assign_op< typename DstXprType::Scalar >, Sparse2Dense >
struct  evaluator< SparseView< Product< Lhs, Rhs, Options > > >
struct  traits< Ref< SparseMatrix< MatScalar, MatOptions, MatIndex >, _Options, _StrideType > >
struct  traits< Ref< const SparseMatrix< MatScalar, MatOptions, MatIndex >, _Options, _StrideType > >
struct  traits< Ref< SparseVector< MatScalar, MatOptions, MatIndex >, _Options, _StrideType > >
struct  traits< Ref< const SparseVector< MatScalar, MatOptions, MatIndex >, _Options, _StrideType > >
struct  traits< SparseRefBase< Derived > >
class  SparseRefBase
struct  evaluator< Ref< SparseMatrix< MatScalar, MatOptions, MatIndex >, Options, StrideType > >
struct  evaluator< Ref< const SparseMatrix< MatScalar, MatOptions, MatIndex >, Options, StrideType > >
struct  evaluator< Ref< SparseVector< MatScalar, MatOptions, MatIndex >, Options, StrideType > >
struct  evaluator< Ref< const SparseVector< MatScalar, MatOptions, MatIndex >, Options, StrideType > >
struct  traits< SparseSelfAdjointView< MatrixType, Mode > >
struct  evaluator_traits< SparseSelfAdjointView< MatrixType, Mode > >
struct  SparseSelfAdjoint2Sparse
struct  AssignmentKind< SparseShape, SparseSelfAdjointShape >
struct  AssignmentKind< SparseSelfAdjointShape, SparseShape >
struct  Assignment< DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse, Scalar >
struct  generic_product_impl< LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType >
struct  generic_product_impl< Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType >
struct  product_evaluator< Product< LhsView, Rhs, DefaultProduct >, ProductTag, SparseSelfAdjointShape, SparseShape >
struct  product_evaluator< Product< Lhs, RhsView, DefaultProduct >, ProductTag, SparseShape, SparseSelfAdjointShape >
struct  traits< SparseSymmetricPermutationProduct< MatrixType, Mode > >
struct  Assignment< DstXprType, SparseSymmetricPermutationProduct< MatrixType, Mode >, internal::assign_op< Scalar >, Sparse2Sparse >
struct  sparse_sparse_product_with_pruning_selector< Lhs, Rhs, ResultType, ColMajor, ColMajor, ColMajor >
struct  sparse_sparse_product_with_pruning_selector< Lhs, Rhs, ResultType, ColMajor, ColMajor, RowMajor >
struct  sparse_sparse_product_with_pruning_selector< Lhs, Rhs, ResultType, RowMajor, RowMajor, RowMajor >
struct  sparse_sparse_product_with_pruning_selector< Lhs, Rhs, ResultType, RowMajor, RowMajor, ColMajor >
struct  sparse_sparse_product_with_pruning_selector< Lhs, Rhs, ResultType, ColMajor, RowMajor, RowMajor >
struct  sparse_sparse_product_with_pruning_selector< Lhs, Rhs, ResultType, RowMajor, ColMajor, RowMajor >
struct  sparse_sparse_product_with_pruning_selector< Lhs, Rhs, ResultType, ColMajor, RowMajor, ColMajor >
struct  sparse_sparse_product_with_pruning_selector< Lhs, Rhs, ResultType, RowMajor, ColMajor, ColMajor >
class  SparseTransposeImpl
class  SparseTransposeImpl< MatrixType, CompressedAccessBit >
struct  unary_evaluator< Transpose< ArgType >, IteratorBased >
struct  unary_evaluator< TriangularView< ArgType, Mode >, IteratorBased >
struct  eval< T, Sparse >
struct  sparse_eval< T, 1, Cols, Flags >
struct  sparse_eval< T, Rows, 1, Flags >
struct  sparse_eval
struct  sparse_eval< T, 1, 1, Flags >
struct  plain_matrix_type< T, Sparse >
struct  plain_object_eval< T, Sparse >
struct  solve_traits< Decomposition, RhsType, Sparse >
struct  generic_xpr_base< Derived, MatrixXpr, Sparse >
struct  SparseTriangularShape
struct  SparseSelfAdjointShape
struct  glue_shapes< SparseShape, SelfAdjointShape >
struct  glue_shapes< SparseShape, TriangularShape >
struct  traits< SparseVector< _Scalar, _Options, _StorageIndex > >
struct  evaluator< SparseVector< _Scalar, _Options, _Index > >
struct  sparse_vector_assign_selector< Dest, Src, SVA_Inner >
struct  sparse_vector_assign_selector< Dest, Src, SVA_Outer >
struct  sparse_vector_assign_selector< Dest, Src, SVA_RuntimeSwitch >
struct  traits< SparseView< MatrixType > >
struct  unary_evaluator< SparseView< ArgType >, IteratorBased >
struct  unary_evaluator< SparseView< ArgType >, IndexBased >
struct  sparse_solve_triangular_selector< Lhs, Rhs, Mode, Lower, RowMajor >
struct  sparse_solve_triangular_selector< Lhs, Rhs, Mode, Upper, RowMajor >
struct  sparse_solve_triangular_selector< Lhs, Rhs, Mode, Lower, ColMajor >
struct  sparse_solve_triangular_selector< Lhs, Rhs, Mode, Upper, ColMajor >
struct  sparse_solve_triangular_sparse_selector< Lhs, Rhs, Mode, UpLo, ColMajor >
struct  column_dfs_traits
struct  LU_kernel_bmod
struct  LU_kernel_bmod< 1 >
struct  panel_dfs_traits
struct  LU_GlobalLU_t
struct  perfvalues
class  MappedSuperNodalMatrix
 a class to manipulate the L supernodal factor from the SparseLU factorization More...
class  SparseLUImpl
struct  traits< SparseQRMatrixQReturnType< SparseQRType > >
struct  traits< SparseQRMatrixQTransposeReturnType< SparseQRType > >
struct  traits< SparseQR_QProduct< SparseQRType, Derived > >
struct  evaluator_traits< SparseQRMatrixQReturnType< SparseQRType > >
struct  Assignment< DstXprType, SparseQRMatrixQReturnType< SparseQRType >, internal::assign_op< typename DstXprType::Scalar >, Sparse2Sparse >
struct  Assignment< DstXprType, SparseQRMatrixQReturnType< SparseQRType >, internal::assign_op< typename DstXprType::Scalar >, Sparse2Dense >
struct  traits< SPQRMatrixQReturnType< SPQRType > >
struct  traits< SPQRMatrixQTransposeReturnType< SPQRType > >
struct  traits< SPQR_QProduct< SPQRType, Derived > >
struct  traits< BDCSVD< _MatrixType > >
struct  svd_precondition_2x2_block_to_be_real
struct  qr_preconditioner_should_do_anything
struct  qr_preconditioner_impl
class  qr_preconditioner_impl< MatrixType, QRPreconditioner, Case, false >
class  qr_preconditioner_impl< MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true >
class  qr_preconditioner_impl< MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true >
class  qr_preconditioner_impl< MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true >
class  qr_preconditioner_impl< MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true >
class  qr_preconditioner_impl< MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true >
class  qr_preconditioner_impl< MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true >
struct  svd_precondition_2x2_block_to_be_real< MatrixType, QRPreconditioner, false >
struct  svd_precondition_2x2_block_to_be_real< MatrixType, QRPreconditioner, true >
struct  traits< JacobiSVD< _MatrixType, QRPreconditioner > >
class  UpperBidiagonalization

Typedefs

typedef __vector float Packet4f
typedef __vector int Packet4i
typedef __vector unsigned int Packet4ui
typedef __vector __bool int Packet4bi
typedef __vector short int Packet8i
typedef __vector unsigned char Packet16uc
typedef __m256 Packet8f
typedef __m256d Packet4d
typedef float32x2_t Packet2f
typedef int32x2_t Packet2i
typedef __m128d Packet2d

Enumerations

enum  SignMatrix { PositiveSemiDef, NegativeSemiDef, ZeroSign, Indefinite }
enum  PermPermProduct_t { PermPermProduct }
enum  ComparisonName {
  cmp_EQ = 0, cmp_LT = 1, cmp_LE = 2, cmp_UNORD = 3,
  cmp_NEQ = 4, cmp_GT = 5, cmp_GE = 6
}
enum  { SDP_AsScalarProduct, SDP_AsCwiseProduct }
enum  { SVA_RuntimeSwitch, SVA_Inner, SVA_Outer }
enum  { LUNoMarker = 3 }
enum  { emptyIdxLU = -1 }
enum  MemType {
  LUSUP, UCOL, LSUB, USUB,
  LLVL, ULVL
}
enum  { PreconditionIfMoreColsThanRows, PreconditionIfMoreRowsThanCols }

Functions

template<typename MatrixType , typename VectorType >
static Index llt_rank_update_lower (MatrixType &mat, const VectorType &vec, const typename MatrixType::RealScalar &sigma)
template<typename Scalar , typename CholmodType >
void cholmod_configure_matrix (CholmodType &mat)
template<>
EIGEN_STRONG_INLINE Packet2cf pset1< Packet2cf > (const std::complex< float > &from)
template<>
EIGEN_DEVICE_FUNC Packet2cf pgather< std::complex< float >, Packet2cf > (const std::complex< float > *from, Index stride)
template<>
EIGEN_DEVICE_FUNC void pscatter< std::complex< float >, Packet2cf > (std::complex< float > *to, const Packet2cf &from, Index stride)
template<>
EIGEN_STRONG_INLINE Packet2cf padd< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
template<>
EIGEN_STRONG_INLINE Packet2cf psub< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
template<>
EIGEN_STRONG_INLINE Packet2cf pnegate (const Packet2cf &a)
template<>
EIGEN_STRONG_INLINE Packet2cf pconj (const Packet2cf &a)
template<>
EIGEN_STRONG_INLINE Packet2cf pmul< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
template<>
EIGEN_STRONG_INLINE Packet2cf pand< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
template<>
EIGEN_STRONG_INLINE Packet2cf por< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
template<>
EIGEN_STRONG_INLINE Packet2cf pxor< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
template<>
EIGEN_STRONG_INLINE Packet2cf pandnot< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
template<>
EIGEN_STRONG_INLINE Packet2cf pload< Packet2cf > (const std::complex< float > *from)
template<>
EIGEN_STRONG_INLINE Packet2cf ploadu< Packet2cf > (const std::complex< float > *from)
template<>
EIGEN_STRONG_INLINE Packet2cf ploaddup< Packet2cf > (const std::complex< float > *from)
template<>
EIGEN_STRONG_INLINE void pstore< std::complex< float > > (std::complex< float > *to, const Packet2cf &from)
template<>
EIGEN_STRONG_INLINE void pstoreu< std::complex< float > > (std::complex< float > *to, const Packet2cf &from)
template<>
EIGEN_STRONG_INLINE void prefetch< std::complex< float > > (const std::complex< float > *addr)
template<>
EIGEN_STRONG_INLINE
std::complex< float > 
pfirst< Packet2cf > (const Packet2cf &a)
template<>
EIGEN_STRONG_INLINE Packet2cf preverse (const Packet2cf &a)
template<>
EIGEN_STRONG_INLINE
std::complex< float > 
predux< Packet2cf > (const Packet2cf &a)
template<>
EIGEN_STRONG_INLINE Packet2cf preduxp< Packet2cf > (const Packet2cf *vecs)
template<>
EIGEN_STRONG_INLINE
std::complex< float > 
predux_mul< Packet2cf > (const Packet2cf &a)
template<>
EIGEN_STRONG_INLINE Packet2cf pdiv< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
template<>
EIGEN_STRONG_INLINE Packet2cf pcplxflip< Packet2cf > (const Packet2cf &x)
EIGEN_STRONG_INLINE void ptranspose (PacketBlock< Packet2cf, 2 > &kernel)
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
EIGEN_UNUSED Packet4f 
plog< Packet4f > (const Packet4f &_x)
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
EIGEN_UNUSED Packet4f 
pexp< Packet4f > (const Packet4f &_x)
static _EIGEN_DECLARE_CONST_FAST_Packet4f (ZERO, 0)
static _EIGEN_DECLARE_CONST_FAST_Packet4i (ZERO, 0)
static _EIGEN_DECLARE_CONST_FAST_Packet4i (ONE, 1)
static _EIGEN_DECLARE_CONST_FAST_Packet4i (MINUS16,-16)
static _EIGEN_DECLARE_CONST_FAST_Packet4i (MINUS1,-1)
std::ostream & operator<< (std::ostream &s, const Packet16uc &v)
std::ostream & operator<< (std::ostream &s, const Packet4f &v)
std::ostream & operator<< (std::ostream &s, const Packet4i &v)
std::ostream & operator<< (std::ostream &s, const Packet4ui &v)
template<>
EIGEN_STRONG_INLINE Packet4f pload< Packet4f > (const float *from)
template<>
EIGEN_STRONG_INLINE Packet4i pload< Packet4i > (const int *from)
template<>
EIGEN_STRONG_INLINE void pstore< float > (float *to, const Packet4f &from)
template<>
EIGEN_STRONG_INLINE void pstore< int > (int *to, const Packet4i &from)
template<>
EIGEN_STRONG_INLINE Packet4f pset1< Packet4f > (const float &from)
template<>
EIGEN_STRONG_INLINE Packet4i pset1< Packet4i > (const int &from)
template<>
EIGEN_STRONG_INLINE void pbroadcast4< Packet4f > (const float *a, Packet4f &a0, Packet4f &a1, Packet4f &a2, Packet4f &a3)
template<>
EIGEN_STRONG_INLINE void pbroadcast4< Packet4i > (const int *a, Packet4i &a0, Packet4i &a1, Packet4i &a2, Packet4i &a3)
template<>
EIGEN_DEVICE_FUNC Packet4f pgather< float, Packet4f > (const float *from, Index stride)
template<>
EIGEN_DEVICE_FUNC Packet4i pgather< int, Packet4i > (const int *from, Index stride)
template<>
EIGEN_DEVICE_FUNC void pscatter< float, Packet4f > (float *to, const Packet4f &from, Index stride)
template<>
EIGEN_DEVICE_FUNC void pscatter< int, Packet4i > (int *to, const Packet4i &from, Index stride)
template<>
EIGEN_STRONG_INLINE Packet4f plset< Packet4f > (const float &a)
template<>
EIGEN_STRONG_INLINE Packet4i plset< Packet4i > (const int &a)
template<>
EIGEN_STRONG_INLINE Packet4f padd< Packet4f > (const Packet4f &a, const Packet4f &b)
template<>
EIGEN_STRONG_INLINE Packet4i padd< Packet4i > (const Packet4i &a, const Packet4i &b)
template<>
EIGEN_STRONG_INLINE Packet4f psub< Packet4f > (const Packet4f &a, const Packet4f &b)
template<>
EIGEN_STRONG_INLINE Packet4i psub< Packet4i > (const Packet4i &a, const Packet4i &b)
template<>
EIGEN_STRONG_INLINE Packet4f pnegate (const Packet4f &a)
template<>
EIGEN_STRONG_INLINE Packet4i pnegate (const Packet4i &a)
template<>
EIGEN_STRONG_INLINE Packet4f pconj (const Packet4f &a)
template<>
EIGEN_STRONG_INLINE Packet4i pconj (const Packet4i &a)
template<>
EIGEN_STRONG_INLINE Packet4f pmul< Packet4f > (const Packet4f &a, const Packet4f &b)
template<>
EIGEN_STRONG_INLINE Packet4f pdiv< Packet4f > (const Packet4f &a, const Packet4f &b)
template<>
EIGEN_STRONG_INLINE Packet4i pdiv< Packet4i > (const Packet4i &, const Packet4i &)
template<>
EIGEN_STRONG_INLINE Packet4f pmadd (const Packet4f &a, const Packet4f &b, const Packet4f &c)
template<>
EIGEN_STRONG_INLINE Packet4i pmadd (const Packet4i &a, const Packet4i &b, const Packet4i &c)
template<>
EIGEN_STRONG_INLINE Packet4f pmin< Packet4f > (const Packet4f &a, const Packet4f &b)
template<>
EIGEN_STRONG_INLINE Packet4i pmin< Packet4i > (const Packet4i &a, const Packet4i &b)
template<>
EIGEN_STRONG_INLINE Packet4f pmax< Packet4f > (const Packet4f &a, const Packet4f &b)
template<>
EIGEN_STRONG_INLINE Packet4i pmax< Packet4i > (const Packet4i &a, const Packet4i &b)
template<>
EIGEN_STRONG_INLINE Packet4f pand< Packet4f > (const Packet4f &a, const Packet4f &b)
template<>
EIGEN_STRONG_INLINE Packet4i pand< Packet4i > (const Packet4i &a, const Packet4i &b)
template<>
EIGEN_STRONG_INLINE Packet4f por< Packet4f > (const Packet4f &a, const Packet4f &b)
template<>
EIGEN_STRONG_INLINE Packet4i por< Packet4i > (const Packet4i &a, const Packet4i &b)
template<>
EIGEN_STRONG_INLINE Packet4f pxor< Packet4f > (const Packet4f &a, const Packet4f &b)
template<>
EIGEN_STRONG_INLINE Packet4i pxor< Packet4i > (const Packet4i &a, const Packet4i &b)
template<>
EIGEN_STRONG_INLINE Packet4f pandnot< Packet4f > (const Packet4f &a, const Packet4f &b)
template<>
EIGEN_STRONG_INLINE Packet4i pandnot< Packet4i > (const Packet4i &a, const Packet4i &b)
template<>
EIGEN_STRONG_INLINE Packet4i ploadu< Packet4i > (const int *from)
template<>
EIGEN_STRONG_INLINE Packet4f ploadu< Packet4f > (const float *from)
template<>
EIGEN_STRONG_INLINE Packet4f ploaddup< Packet4f > (const float *from)
template<>
EIGEN_STRONG_INLINE Packet4i ploaddup< Packet4i > (const int *from)
template<>
EIGEN_STRONG_INLINE void pstoreu< int > (int *to, const Packet4i &from)
template<>
EIGEN_STRONG_INLINE void pstoreu< float > (float *to, const Packet4f &from)
template<>
EIGEN_STRONG_INLINE void prefetch< float > (const float *addr)
template<>
EIGEN_STRONG_INLINE void prefetch< int > (const int *addr)
template<>
EIGEN_STRONG_INLINE float pfirst< Packet4f > (const Packet4f &a)
template<>
EIGEN_STRONG_INLINE int pfirst< Packet4i > (const Packet4i &a)
template<>
EIGEN_STRONG_INLINE Packet4f preverse (const Packet4f &a)
template<>
EIGEN_STRONG_INLINE Packet4i preverse (const Packet4i &a)
template<>
EIGEN_STRONG_INLINE Packet4f pabs (const Packet4f &a)
template<>
EIGEN_STRONG_INLINE Packet4i pabs (const Packet4i &a)
template<>
EIGEN_STRONG_INLINE float predux< Packet4f > (const Packet4f &a)
template<>
EIGEN_STRONG_INLINE Packet4f preduxp< Packet4f > (const Packet4f *vecs)
template<>
EIGEN_STRONG_INLINE int predux< Packet4i > (const Packet4i &a)
template<>
EIGEN_STRONG_INLINE Packet4i preduxp< Packet4i > (const Packet4i *vecs)
template<>
EIGEN_STRONG_INLINE float predux_mul< Packet4f > (const Packet4f &a)
template<>
EIGEN_STRONG_INLINE int predux_mul< Packet4i > (const Packet4i &a)
template<>
EIGEN_STRONG_INLINE float predux_min< Packet4f > (const Packet4f &a)
template<>
EIGEN_STRONG_INLINE int predux_min< Packet4i > (const Packet4i &a)
template<>
EIGEN_STRONG_INLINE float predux_max< Packet4f > (const Packet4f &a)
template<>
EIGEN_STRONG_INLINE int predux_max< Packet4i > (const Packet4i &a)
EIGEN_DEVICE_FUNC void ptranspose (PacketBlock< Packet4f, 4 > &kernel)
EIGEN_DEVICE_FUNC void ptranspose (PacketBlock< Packet4i, 4 > &kernel)
template<>
EIGEN_STRONG_INLINE Packet4cf padd< Packet4cf > (const Packet4cf &a, const Packet4cf &b)
template<>
EIGEN_STRONG_INLINE Packet4cf psub< Packet4cf > (const Packet4cf &a, const Packet4cf &b)
template<>
EIGEN_STRONG_INLINE Packet4cf pnegate (const Packet4cf &a)
template<>
EIGEN_STRONG_INLINE Packet4cf pconj (const Packet4cf &a)
template<>
EIGEN_STRONG_INLINE Packet4cf pmul< Packet4cf > (const Packet4cf &a, const Packet4cf &b)
template<>
EIGEN_STRONG_INLINE Packet4cf pand< Packet4cf > (const Packet4cf &a, const Packet4cf &b)
template<>
EIGEN_STRONG_INLINE Packet4cf por< Packet4cf > (const Packet4cf &a, const Packet4cf &b)
template<>
EIGEN_STRONG_INLINE Packet4cf pxor< Packet4cf > (const Packet4cf &a, const Packet4cf &b)
template<>
EIGEN_STRONG_INLINE Packet4cf pandnot< Packet4cf > (const Packet4cf &a, const Packet4cf &b)
template<>
EIGEN_STRONG_INLINE Packet4cf pload< Packet4cf > (const std::complex< float > *from)
template<>
EIGEN_STRONG_INLINE Packet4cf ploadu< Packet4cf > (const std::complex< float > *from)
template<>
EIGEN_STRONG_INLINE Packet4cf pset1< Packet4cf > (const std::complex< float > &from)
template<>
EIGEN_STRONG_INLINE Packet4cf ploaddup< Packet4cf > (const std::complex< float > *from)
template<>
EIGEN_STRONG_INLINE void pstore< std::complex< float > > (std::complex< float > *to, const Packet4cf &from)
template<>
EIGEN_STRONG_INLINE void pstoreu< std::complex< float > > (std::complex< float > *to, const Packet4cf &from)
template<>
EIGEN_DEVICE_FUNC Packet4cf pgather< std::complex< float >, Packet4cf > (const std::complex< float > *from, Index stride)
template<>
EIGEN_DEVICE_FUNC void pscatter< std::complex< float >, Packet4cf > (std::complex< float > *to, const Packet4cf &from, Index stride)
template<>
EIGEN_STRONG_INLINE
std::complex< float > 
pfirst< Packet4cf > (const Packet4cf &a)
template<>
EIGEN_STRONG_INLINE Packet4cf preverse (const Packet4cf &a)
template<>
EIGEN_STRONG_INLINE
std::complex< float > 
predux< Packet4cf > (const Packet4cf &a)
template<>
EIGEN_STRONG_INLINE Packet4cf preduxp< Packet4cf > (const Packet4cf *vecs)
template<>
EIGEN_STRONG_INLINE
std::complex< float > 
predux_mul< Packet4cf > (const Packet4cf &a)
template<>
EIGEN_STRONG_INLINE Packet4cf pdiv< Packet4cf > (const Packet4cf &a, const Packet4cf &b)
template<>
EIGEN_STRONG_INLINE Packet4cf pcplxflip< Packet4cf > (const Packet4cf &x)
template<>
EIGEN_STRONG_INLINE Packet2cd padd< Packet2cd > (const Packet2cd &a, const Packet2cd &b)
template<>
EIGEN_STRONG_INLINE Packet2cd psub< Packet2cd > (const Packet2cd &a, const Packet2cd &b)
template<>
EIGEN_STRONG_INLINE Packet2cd pnegate (const Packet2cd &a)
template<>
EIGEN_STRONG_INLINE Packet2cd pconj (const Packet2cd &a)
template<>
EIGEN_STRONG_INLINE Packet2cd pmul< Packet2cd > (const Packet2cd &a, const Packet2cd &b)
template<>
EIGEN_STRONG_INLINE Packet2cd pand< Packet2cd > (const Packet2cd &a, const Packet2cd &b)
template<>
EIGEN_STRONG_INLINE Packet2cd por< Packet2cd > (const Packet2cd &a, const Packet2cd &b)
template<>
EIGEN_STRONG_INLINE Packet2cd pxor< Packet2cd > (const Packet2cd &a, const Packet2cd &b)
template<>
EIGEN_STRONG_INLINE Packet2cd pandnot< Packet2cd > (const Packet2cd &a, const Packet2cd &b)
template<>
EIGEN_STRONG_INLINE Packet2cd pload< Packet2cd > (const std::complex< double > *from)
template<>
EIGEN_STRONG_INLINE Packet2cd ploadu< Packet2cd > (const std::complex< double > *from)
template<>
EIGEN_STRONG_INLINE Packet2cd pset1< Packet2cd > (const std::complex< double > &from)
template<>
EIGEN_STRONG_INLINE Packet2cd ploaddup< Packet2cd > (const std::complex< double > *from)
template<>
EIGEN_STRONG_INLINE void pstore< std::complex< double > > (std::complex< double > *to, const Packet2cd &from)
template<>
EIGEN_STRONG_INLINE void pstoreu< std::complex< double > > (std::complex< double > *to, const Packet2cd &from)
template<>
EIGEN_DEVICE_FUNC Packet2cd pgather< std::complex< double >, Packet2cd > (const std::complex< double > *from, Index stride)
template<>
EIGEN_DEVICE_FUNC void pscatter< std::complex< double >, Packet2cd > (std::complex< double > *to, const Packet2cd &from, Index stride)
template<>
EIGEN_STRONG_INLINE
std::complex< double > 
pfirst< Packet2cd > (const Packet2cd &a)
template<>
EIGEN_STRONG_INLINE Packet2cd preverse (const Packet2cd &a)
template<>
EIGEN_STRONG_INLINE
std::complex< double > 
predux< Packet2cd > (const Packet2cd &a)
template<>
EIGEN_STRONG_INLINE Packet2cd preduxp< Packet2cd > (const Packet2cd *vecs)
template<>
EIGEN_STRONG_INLINE
std::complex< double > 
predux_mul< Packet2cd > (const Packet2cd &a)
template<>
EIGEN_STRONG_INLINE Packet2cd pdiv< Packet2cd > (const Packet2cd &a, const Packet2cd &b)
template<>
EIGEN_STRONG_INLINE Packet2cd pcplxflip< Packet2cd > (const Packet2cd &x)
EIGEN_DEVICE_FUNC void ptranspose (PacketBlock< Packet4cf, 4 > &kernel)
EIGEN_DEVICE_FUNC void ptranspose (PacketBlock< Packet2cd, 2 > &kernel)
Packet8i pshiftleft (Packet8i v, int n)
Packet8f pshiftright (Packet8f v, int n)
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
EIGEN_UNUSED Packet8f 
psin< Packet8f > (const Packet8f &_x)
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
EIGEN_UNUSED Packet8f 
plog< Packet8f > (const Packet8f &_x)
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
EIGEN_UNUSED Packet8f 
pexp< Packet8f > (const Packet8f &_x)
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
EIGEN_UNUSED Packet8f 
ptanh< Packet8f > (const Packet8f &_x)
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
EIGEN_UNUSED Packet4d 
pexp< Packet4d > (const Packet4d &_x)
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
EIGEN_UNUSED Packet8f 
psqrt< Packet8f > (const Packet8f &x)
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
EIGEN_UNUSED Packet4d 
psqrt< Packet4d > (const Packet4d &x)
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
EIGEN_UNUSED Packet8f 
prsqrt< Packet8f > (const Packet8f &x)
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
EIGEN_UNUSED Packet4d 
prsqrt< Packet4d > (const Packet4d &x)
template<>
EIGEN_STRONG_INLINE Packet8f pset1< Packet8f > (const float &from)
template<>
EIGEN_STRONG_INLINE Packet4d pset1< Packet4d > (const double &from)
template<>
EIGEN_STRONG_INLINE Packet8i pset1< Packet8i > (const int &from)
template<>
EIGEN_STRONG_INLINE Packet8f pload1< Packet8f > (const float *from)
template<>
EIGEN_STRONG_INLINE Packet4d pload1< Packet4d > (const double *from)
template<>
EIGEN_STRONG_INLINE Packet8f plset< Packet8f > (const float &a)
template<>
EIGEN_STRONG_INLINE Packet4d plset< Packet4d > (const double &a)
template<>
EIGEN_STRONG_INLINE Packet8f padd< Packet8f > (const Packet8f &a, const Packet8f &b)
template<>
EIGEN_STRONG_INLINE Packet4d padd< Packet4d > (const Packet4d &a, const Packet4d &b)
template<>
EIGEN_STRONG_INLINE Packet8f psub< Packet8f > (const Packet8f &a, const Packet8f &b)
template<>
EIGEN_STRONG_INLINE Packet4d psub< Packet4d > (const Packet4d &a, const Packet4d &b)
template<>
EIGEN_STRONG_INLINE Packet8f pnegate (const Packet8f &a)
template<>
EIGEN_STRONG_INLINE Packet4d pnegate (const Packet4d &a)
template<>
EIGEN_STRONG_INLINE Packet8f pconj (const Packet8f &a)
template<>
EIGEN_STRONG_INLINE Packet4d pconj (const Packet4d &a)
template<>
EIGEN_STRONG_INLINE Packet8i pconj (const Packet8i &a)
template<>
EIGEN_STRONG_INLINE Packet8f pmul< Packet8f > (const Packet8f &a, const Packet8f &b)
template<>
EIGEN_STRONG_INLINE Packet4d pmul< Packet4d > (const Packet4d &a, const Packet4d &b)
template<>
EIGEN_STRONG_INLINE Packet8f pdiv< Packet8f > (const Packet8f &a, const Packet8f &b)
template<>
EIGEN_STRONG_INLINE Packet4d pdiv< Packet4d > (const Packet4d &a, const Packet4d &b)
template<>
EIGEN_STRONG_INLINE Packet8i pdiv< Packet8i > (const Packet8i &, const Packet8i &)
template<>
EIGEN_STRONG_INLINE Packet8f pmin< Packet8f > (const Packet8f &a, const Packet8f &b)
template<>
EIGEN_STRONG_INLINE Packet4d pmin< Packet4d > (const Packet4d &a, const Packet4d &b)
template<>
EIGEN_STRONG_INLINE Packet8f pmax< Packet8f > (const Packet8f &a, const Packet8f &b)
template<>
EIGEN_STRONG_INLINE Packet4d pmax< Packet4d > (const Packet4d &a, const Packet4d &b)
template<>
EIGEN_STRONG_INLINE Packet8f pround< Packet8f > (const Packet8f &a)
template<>
EIGEN_STRONG_INLINE Packet4d pround< Packet4d > (const Packet4d &a)
template<>
EIGEN_STRONG_INLINE Packet8f pceil< Packet8f > (const Packet8f &a)
template<>
EIGEN_STRONG_INLINE Packet4d pceil< Packet4d > (const Packet4d &a)
template<>
EIGEN_STRONG_INLINE Packet8f pfloor< Packet8f > (const Packet8f &a)
template<>
EIGEN_STRONG_INLINE Packet4d pfloor< Packet4d > (const Packet4d &a)
template<>
EIGEN_STRONG_INLINE Packet8f pand< Packet8f > (const Packet8f &a, const Packet8f &b)
template<>
EIGEN_STRONG_INLINE Packet4d pand< Packet4d > (const Packet4d &a, const Packet4d &b)
template<>
EIGEN_STRONG_INLINE Packet8f por< Packet8f > (const Packet8f &a, const Packet8f &b)
template<>
EIGEN_STRONG_INLINE Packet4d por< Packet4d > (const Packet4d &a, const Packet4d &b)
template<>
EIGEN_STRONG_INLINE Packet8f pxor< Packet8f > (const Packet8f &a, const Packet8f &b)
template<>
EIGEN_STRONG_INLINE Packet4d pxor< Packet4d > (const Packet4d &a, const Packet4d &b)
template<>
EIGEN_STRONG_INLINE Packet8f pandnot< Packet8f > (const Packet8f &a, const Packet8f &b)
template<>
EIGEN_STRONG_INLINE Packet4d pandnot< Packet4d > (const Packet4d &a, const Packet4d &b)
template<>
EIGEN_STRONG_INLINE Packet8f pload< Packet8f > (const float *from)
template<>
EIGEN_STRONG_INLINE Packet4d pload< Packet4d > (const double *from)
template<>
EIGEN_STRONG_INLINE Packet8i pload< Packet8i > (const int *from)
template<>
EIGEN_STRONG_INLINE Packet8f ploadu< Packet8f > (const float *from)
template<>
EIGEN_STRONG_INLINE Packet4d ploadu< Packet4d > (const double *from)
template<>
EIGEN_STRONG_INLINE Packet8i ploadu< Packet8i > (const int *from)
template<>
EIGEN_STRONG_INLINE Packet8f ploaddup< Packet8f > (const float *from)
template<>
EIGEN_STRONG_INLINE Packet4d ploaddup< Packet4d > (const double *from)
template<>
EIGEN_STRONG_INLINE Packet8f ploadquad< Packet8f > (const float *from)
template<>
EIGEN_STRONG_INLINE void pstore< float > (float *to, const Packet8f &from)
template<>
EIGEN_STRONG_INLINE void pstore< double > (double *to, const Packet4d &from)
template<>
EIGEN_STRONG_INLINE void pstore< int > (int *to, const Packet8i &from)
template<>
EIGEN_STRONG_INLINE void pstoreu< float > (float *to, const Packet8f &from)
template<>
EIGEN_STRONG_INLINE void pstoreu< double > (double *to, const Packet4d &from)
template<>
EIGEN_STRONG_INLINE void pstoreu< int > (int *to, const Packet8i &from)
template<>
EIGEN_DEVICE_FUNC Packet8f pgather< float, Packet8f > (const float *from, Index stride)
template<>
EIGEN_DEVICE_FUNC Packet4d pgather< double, Packet4d > (const double *from, Index stride)
template<>
EIGEN_DEVICE_FUNC void pscatter< float, Packet8f > (float *to, const Packet8f &from, Index stride)
template<>
EIGEN_DEVICE_FUNC void pscatter< double, Packet4d > (double *to, const Packet4d &from, Index stride)
template<>
EIGEN_STRONG_INLINE void pstore1< Packet8f > (float *to, const float &a)
template<>
EIGEN_STRONG_INLINE void pstore1< Packet4d > (double *to, const double &a)
template<>
EIGEN_STRONG_INLINE void pstore1< Packet8i > (int *to, const int &a)
template<>
EIGEN_STRONG_INLINE void prefetch< double > (const double *addr)
template<>
EIGEN_STRONG_INLINE float pfirst< Packet8f > (const Packet8f &a)
template<>
EIGEN_STRONG_INLINE double pfirst< Packet4d > (const Packet4d &a)
template<>
EIGEN_STRONG_INLINE int pfirst< Packet8i > (const Packet8i &a)
template<>
EIGEN_STRONG_INLINE Packet8f preverse (const Packet8f &a)
template<>
EIGEN_STRONG_INLINE Packet4d preverse (const Packet4d &a)
template<>
EIGEN_STRONG_INLINE Packet8f pabs (const Packet8f &a)
template<>
EIGEN_STRONG_INLINE Packet4d pabs (const Packet4d &a)
template<>
EIGEN_STRONG_INLINE Packet8f preduxp< Packet8f > (const Packet8f *vecs)
template<>
EIGEN_STRONG_INLINE Packet4d preduxp< Packet4d > (const Packet4d *vecs)
template<>
EIGEN_STRONG_INLINE float predux< Packet8f > (const Packet8f &a)
template<>
EIGEN_STRONG_INLINE double predux< Packet4d > (const Packet4d &a)
template<>
EIGEN_STRONG_INLINE Packet4f predux4< Packet8f > (const Packet8f &a)
template<>
EIGEN_STRONG_INLINE float predux_mul< Packet8f > (const Packet8f &a)
template<>
EIGEN_STRONG_INLINE double predux_mul< Packet4d > (const Packet4d &a)
template<>
EIGEN_STRONG_INLINE float predux_min< Packet8f > (const Packet8f &a)
template<>
EIGEN_STRONG_INLINE double predux_min< Packet4d > (const Packet4d &a)
template<>
EIGEN_STRONG_INLINE float predux_max< Packet8f > (const Packet8f &a)
template<>
EIGEN_STRONG_INLINE double predux_max< Packet4d > (const Packet4d &a)
EIGEN_DEVICE_FUNC void ptranspose (PacketBlock< Packet8f, 8 > &kernel)
EIGEN_DEVICE_FUNC void ptranspose (PacketBlock< Packet8f, 4 > &kernel)
EIGEN_DEVICE_FUNC void ptranspose (PacketBlock< Packet4d, 4 > &kernel)
template<>
EIGEN_STRONG_INLINE Packet8f pblend (const Selector< 8 > &ifPacket, const Packet8f &thenPacket, const Packet8f &elsePacket)
template<>
EIGEN_STRONG_INLINE Packet4d pblend (const Selector< 4 > &ifPacket, const Packet4d &thenPacket, const Packet4d &elsePacket)
template<>
EIGEN_STRONG_INLINE Packet8i pcast< Packet8f, Packet8i > (const Packet8f &a)
template<>
EIGEN_STRONG_INLINE Packet8f pcast< Packet8i, Packet8f > (const Packet8i &a)
template<>
EIGEN_STRONG_INLINE Packet4i pmul< Packet4i > (const Packet4i &a, const Packet4i &b)
EIGEN_STRONG_INLINE Packet2cf pcplxflip (const Packet2cf &x)
template<>
EIGEN_STRONG_INLINE Packet1cd padd< Packet1cd > (const Packet1cd &a, const Packet1cd &b)
template<>
EIGEN_STRONG_INLINE Packet1cd psub< Packet1cd > (const Packet1cd &a, const Packet1cd &b)
template<>
EIGEN_STRONG_INLINE Packet1cd pnegate (const Packet1cd &a)
template<>
EIGEN_STRONG_INLINE Packet1cd pconj (const Packet1cd &a)
template<>
EIGEN_STRONG_INLINE Packet1cd pmul< Packet1cd > (const Packet1cd &a, const Packet1cd &b)
template<>
EIGEN_STRONG_INLINE Packet1cd pand< Packet1cd > (const Packet1cd &a, const Packet1cd &b)
template<>
EIGEN_STRONG_INLINE Packet1cd por< Packet1cd > (const Packet1cd &a, const Packet1cd &b)
template<>
EIGEN_STRONG_INLINE Packet1cd pxor< Packet1cd > (const Packet1cd &a, const Packet1cd &b)
template<>
EIGEN_STRONG_INLINE Packet1cd pandnot< Packet1cd > (const Packet1cd &a, const Packet1cd &b)
template<>
EIGEN_STRONG_INLINE Packet1cd pload< Packet1cd > (const std::complex< double > *from)
template<>
EIGEN_STRONG_INLINE Packet1cd ploadu< Packet1cd > (const std::complex< double > *from)
template<>
EIGEN_STRONG_INLINE Packet1cd pset1< Packet1cd > (const std::complex< double > &from)
template<>
EIGEN_STRONG_INLINE Packet1cd ploaddup< Packet1cd > (const std::complex< double > *from)
template<>
EIGEN_STRONG_INLINE void pstore< std::complex< double > > (std::complex< double > *to, const Packet1cd &from)
template<>
EIGEN_STRONG_INLINE void pstoreu< std::complex< double > > (std::complex< double > *to, const Packet1cd &from)
template<>
EIGEN_STRONG_INLINE void prefetch< std::complex< double > > (const std::complex< double > *addr)
template<>
EIGEN_STRONG_INLINE
std::complex< double > 
pfirst< Packet1cd > (const Packet1cd &a)
template<>
EIGEN_STRONG_INLINE Packet1cd preverse (const Packet1cd &a)
template<>
EIGEN_STRONG_INLINE
std::complex< double > 
predux< Packet1cd > (const Packet1cd &a)
template<>
EIGEN_STRONG_INLINE Packet1cd preduxp< Packet1cd > (const Packet1cd *vecs)
template<>
EIGEN_STRONG_INLINE
std::complex< double > 
predux_mul< Packet1cd > (const Packet1cd &a)
template<>
EIGEN_STRONG_INLINE Packet1cd pdiv< Packet1cd > (const Packet1cd &a, const Packet1cd &b)
EIGEN_STRONG_INLINE Packet1cd pcplxflip (const Packet1cd &x)
template<>
EIGEN_STRONG_INLINE Packet2cf pblend (const Selector< 2 > &ifPacket, const Packet2cf &thenPacket, const Packet2cf &elsePacket)
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
EIGEN_UNUSED Packet2d 
pexp< Packet2d > (const Packet2d &_x)
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
EIGEN_UNUSED Packet4f 
psin< Packet4f > (const Packet4f &_x)
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
EIGEN_UNUSED Packet4f 
pcos< Packet4f > (const Packet4f &_x)
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
EIGEN_UNUSED Packet4f 
psqrt< Packet4f > (const Packet4f &x)
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
EIGEN_UNUSED Packet2d 
psqrt< Packet2d > (const Packet2d &x)
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
EIGEN_UNUSED Packet4f 
prsqrt< Packet4f > (const Packet4f &x)
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
EIGEN_UNUSED Packet2d 
prsqrt< Packet2d > (const Packet2d &x)
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
EIGEN_UNUSED Packet4f 
ptanh< Packet4f > (const Packet4f &_x)
template<>
EIGEN_STRONG_INLINE Packet2d pset1< Packet2d > (const double &from)
template<>
EIGEN_STRONG_INLINE Packet2d plset< Packet2d > (const double &a)
template<>
EIGEN_STRONG_INLINE Packet2d padd< Packet2d > (const Packet2d &a, const Packet2d &b)
template<>
EIGEN_STRONG_INLINE Packet2d psub< Packet2d > (const Packet2d &a, const Packet2d &b)
template<>
EIGEN_STRONG_INLINE Packet2d pnegate (const Packet2d &a)
template<>
EIGEN_STRONG_INLINE Packet2d pconj (const Packet2d &a)
template<>
EIGEN_STRONG_INLINE Packet2d pmul< Packet2d > (const Packet2d &a, const Packet2d &b)
template<>
EIGEN_STRONG_INLINE Packet2d pdiv< Packet2d > (const Packet2d &a, const Packet2d &b)
template<>
EIGEN_STRONG_INLINE Packet2d pmin< Packet2d > (const Packet2d &a, const Packet2d &b)
template<>
EIGEN_STRONG_INLINE Packet2d pmax< Packet2d > (const Packet2d &a, const Packet2d &b)
template<>
EIGEN_STRONG_INLINE Packet2d pand< Packet2d > (const Packet2d &a, const Packet2d &b)
template<>
EIGEN_STRONG_INLINE Packet2d por< Packet2d > (const Packet2d &a, const Packet2d &b)
template<>
EIGEN_STRONG_INLINE Packet2d pxor< Packet2d > (const Packet2d &a, const Packet2d &b)
template<>
EIGEN_STRONG_INLINE Packet2d pandnot< Packet2d > (const Packet2d &a, const Packet2d &b)
template<>
EIGEN_STRONG_INLINE Packet2d pload< Packet2d > (const double *from)
template<>
EIGEN_STRONG_INLINE Packet2d ploadu< Packet2d > (const double *from)
template<>
EIGEN_STRONG_INLINE Packet2d ploaddup< Packet2d > (const double *from)
template<>
EIGEN_STRONG_INLINE void pstore< double > (double *to, const Packet2d &from)
template<>
EIGEN_STRONG_INLINE void pstoreu< double > (double *to, const Packet2d &from)
template<>
EIGEN_DEVICE_FUNC Packet2d pgather< double, Packet2d > (const double *from, Index stride)
template<>
EIGEN_DEVICE_FUNC void pscatter< double, Packet2d > (double *to, const Packet2d &from, Index stride)
template<>
EIGEN_STRONG_INLINE void pstore1< Packet4f > (float *to, const float &a)
template<>
EIGEN_STRONG_INLINE void pstore1< Packet2d > (double *to, const double &a)
template<>
EIGEN_STRONG_INLINE double pfirst< Packet2d > (const Packet2d &a)
template<>
EIGEN_STRONG_INLINE Packet2d preverse (const Packet2d &a)
template<>
EIGEN_STRONG_INLINE Packet2d pabs (const Packet2d &a)
template<>
EIGEN_STRONG_INLINE void pbroadcast4< Packet2d > (const double *a, Packet2d &a0, Packet2d &a1, Packet2d &a2, Packet2d &a3)
EIGEN_STRONG_INLINE void punpackp (Packet4f *vecs)
template<>
EIGEN_STRONG_INLINE double predux< Packet2d > (const Packet2d &a)
template<>
EIGEN_STRONG_INLINE Packet2d preduxp< Packet2d > (const Packet2d *vecs)
template<>
EIGEN_STRONG_INLINE double predux_mul< Packet2d > (const Packet2d &a)
template<>
EIGEN_STRONG_INLINE double predux_min< Packet2d > (const Packet2d &a)
template<>
EIGEN_STRONG_INLINE double predux_max< Packet2d > (const Packet2d &a)
EIGEN_DEVICE_FUNC void ptranspose (PacketBlock< Packet2d, 2 > &kernel)
template<>
EIGEN_STRONG_INLINE Packet4i pblend (const Selector< 4 > &ifPacket, const Packet4i &thenPacket, const Packet4i &elsePacket)
template<>
EIGEN_STRONG_INLINE Packet4f pblend (const Selector< 4 > &ifPacket, const Packet4f &thenPacket, const Packet4f &elsePacket)
template<>
EIGEN_STRONG_INLINE Packet2d pblend (const Selector< 2 > &ifPacket, const Packet2d &thenPacket, const Packet2d &elsePacket)
template<>
EIGEN_STRONG_INLINE Packet4i pcast< Packet4f, Packet4i > (const Packet4f &a)
template<>
EIGEN_STRONG_INLINE Packet4f pcast< Packet4i, Packet4f > (const Packet4i &a)
template<>
EIGEN_STRONG_INLINE Packet4f pcast< Packet2d, Packet4f > (const Packet2d &a, const Packet2d &b)
template<>
EIGEN_STRONG_INLINE Packet2d pcast< Packet4f, Packet2d > (const Packet4f &a)
template<typename DstXprType , typename SrcXprType , typename Functor >
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void 
call_dense_assignment_loop (const DstXprType &dst, const SrcXprType &src, const Functor &func)
template<typename DstXprType , typename SrcXprType >
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void 
call_dense_assignment_loop (const DstXprType &dst, const SrcXprType &src)
template<typename Dst , typename Src >
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void 
call_assignment (Dst &dst, const Src &src)
template<typename Dst , typename Src >
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void 
call_assignment (const Dst &dst, const Src &src)
template<typename Dst , typename Src , typename Func >
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void 
call_assignment (Dst &dst, const Src &src, const Func &func, typename enable_if< evaluator_assume_aliasing< Src >::value, void * >::type=0)
template<typename Dst , typename Src , typename Func >
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void 
call_assignment (Dst &dst, const Src &src, const Func &func, typename enable_if<!evaluator_assume_aliasing< Src >::value, void * >::type=0)
template<typename Dst , template< typename > class StorageBase, typename Src , typename Func >
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void 
call_assignment (NoAlias< Dst, StorageBase > &dst, const Src &src, const Func &func)
template<typename Dst , typename Src , typename Func >
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void 
call_assignment_no_alias (Dst &dst, const Src &src, const Func &func)
template<typename Dst , typename Src >
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void 
call_assignment_no_alias (Dst &dst, const Src &src)
template<typename Dst , typename Src , typename Func >
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void 
call_assignment_no_alias_no_transpose (Dst &dst, const Src &src, const Func &func)
template<typename Dst , typename Src >
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void 
call_assignment_no_alias_no_transpose (Dst &dst, const Src &src)
template<typename Dst , typename Src >
void check_for_aliasing (const Dst &dst, const Src &src)
static void check_DenseIndex_is_signed ()
template<int Alignment, typename Derived >
static Index first_aligned (const DenseBase< Derived > &m)
template<typename Derived >
static Index first_default_aligned (const DenseBase< Derived > &m)
template<typename T , int Size>
EIGEN_DEVICE_FUNC void check_static_allocation_size ()
template<typename SrcPacket , typename TgtPacket >
EIGEN_DEVICE_FUNC TgtPacket pcast (const SrcPacket &a)
template<typename SrcPacket , typename TgtPacket >
EIGEN_DEVICE_FUNC TgtPacket pcast (const SrcPacket &a, const SrcPacket &)
template<typename SrcPacket , typename TgtPacket >
EIGEN_DEVICE_FUNC TgtPacket pcast (const SrcPacket &a, const SrcPacket &, const SrcPacket &, const SrcPacket &)
template<typename Packet >
EIGEN_DEVICE_FUNC Packet padd (const Packet &a, const Packet &b)
template<typename Packet >
EIGEN_DEVICE_FUNC Packet psub (const Packet &a, const Packet &b)
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pnegate (const Packet &a)
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pconj (const Packet &a)
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pmul (const Packet &a, const Packet &b)
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pdiv (const Packet &a, const Packet &b)
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pmin (const Packet &a, const Packet &b)
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pmax (const Packet &a, const Packet &b)
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pabs (const Packet &a)
template<typename Packet >
EIGEN_DEVICE_FUNC Packet parg (const Packet &a)
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pand (const Packet &a, const Packet &b)
template<typename Packet >
EIGEN_DEVICE_FUNC Packet por (const Packet &a, const Packet &b)
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pxor (const Packet &a, const Packet &b)
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pandnot (const Packet &a, const Packet &b)
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pload (const typename unpacket_traits< Packet >::type *from)
template<typename Packet >
EIGEN_DEVICE_FUNC Packet ploadu (const typename unpacket_traits< Packet >::type *from)
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pset1 (const typename unpacket_traits< Packet >::type &a)
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pload1 (const typename unpacket_traits< Packet >::type *a)
template<typename Packet >
EIGEN_DEVICE_FUNC Packet ploaddup (const typename unpacket_traits< Packet >::type *from)
template<typename Packet >
EIGEN_DEVICE_FUNC Packet ploadquad (const typename unpacket_traits< Packet >::type *from)
template<typename Packet >
EIGEN_DEVICE_FUNC void pbroadcast4 (const typename unpacket_traits< Packet >::type *a, Packet &a0, Packet &a1, Packet &a2, Packet &a3)
template<typename Packet >
EIGEN_DEVICE_FUNC void pbroadcast2 (const typename unpacket_traits< Packet >::type *a, Packet &a0, Packet &a1)
template<typename Packet >
Packet plset (const typename unpacket_traits< Packet >::type &a)
 Returns a packet with coefficients (a,a+1,...,a+packet_size-1).
template<typename Scalar , typename Packet >
EIGEN_DEVICE_FUNC void pstore (Scalar *to, const Packet &from)
template<typename Scalar , typename Packet >
EIGEN_DEVICE_FUNC void pstoreu (Scalar *to, const Packet &from)
template<typename Scalar , typename Packet >
EIGEN_DEVICE_FUNC Packet pgather (const Scalar *from, Index)
template<typename Scalar , typename Packet >
EIGEN_DEVICE_FUNC void pscatter (Scalar *to, const Packet &from, Index)
template<typename Scalar >
EIGEN_DEVICE_FUNC void prefetch (const Scalar *addr)
template<typename Packet >
EIGEN_DEVICE_FUNC
unpacket_traits< Packet >
::type 
pfirst (const Packet &a)
template<typename Packet >
EIGEN_DEVICE_FUNC Packet preduxp (const Packet *vecs)
template<typename Packet >
EIGEN_DEVICE_FUNC
unpacket_traits< Packet >
::type 
predux (const Packet &a)
template<typename Packet >
EIGEN_DEVICE_FUNC conditional
<(unpacket_traits< Packet >
::size%8)==0, typename
unpacket_traits< Packet >
::half, Packet >::type 
predux4 (const Packet &a)
template<typename Packet >
EIGEN_DEVICE_FUNC
unpacket_traits< Packet >
::type 
predux_mul (const Packet &a)
template<typename Packet >
EIGEN_DEVICE_FUNC
unpacket_traits< Packet >
::type 
predux_min (const Packet &a)
template<typename Packet >
EIGEN_DEVICE_FUNC
unpacket_traits< Packet >
::type 
predux_max (const Packet &a)
template<typename Packet >
EIGEN_DEVICE_FUNC Packet preverse (const Packet &a)
template<size_t offset, typename Packet >
EIGEN_DEVICE_FUNC Packet protate (const Packet &a)
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pcplxflip (const Packet &a)
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet 
psin (const Packet &a)
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet 
pcos (const Packet &a)
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet 
ptan (const Packet &a)
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet 
pasin (const Packet &a)
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet 
pacos (const Packet &a)
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet 
patan (const Packet &a)
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet 
psinh (const Packet &a)
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet 
pcosh (const Packet &a)
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet 
ptanh (const Packet &a)
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet 
pexp (const Packet &a)
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet 
plog (const Packet &a)
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet 
plog10 (const Packet &a)
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet 
psqrt (const Packet &a)
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet 
prsqrt (const Packet &a)
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet 
pround (const Packet &a)
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet 
pfloor (const Packet &a)
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet 
pceil (const Packet &a)
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet 
plgamma (const Packet &a)
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet 
pdigamma (const Packet &a)
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet 
perf (const Packet &a)
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet 
perfc (const Packet &a)
template<typename Packet >
void pstore1 (typename unpacket_traits< Packet >::type *to, const typename unpacket_traits< Packet >::type &a)
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pmadd (const Packet &a, const Packet &b, const Packet &c)
template<typename Packet , int Alignment>
EIGEN_DEVICE_FUNC
EIGEN_ALWAYS_INLINE Packet 
ploadt (const typename unpacket_traits< Packet >::type *from)
template<typename Scalar , typename Packet , int Alignment>
EIGEN_DEVICE_FUNC
EIGEN_ALWAYS_INLINE void 
pstoret (Scalar *to, const Packet &from)
template<typename Packet , int LoadMode>
Packet ploadt_ro (const typename unpacket_traits< Packet >::type *from)
template<int Offset, typename PacketType >
void palign (PacketType &first, const PacketType &second)
template<>
std::complex< float > pmul (const std::complex< float > &a, const std::complex< float > &b)
template<>
std::complex< double > pmul (const std::complex< double > &a, const std::complex< double > &b)
template<typename Packet >
EIGEN_DEVICE_FUNC void ptranspose (PacketBlock< Packet, 1 > &)
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pblend (const Selector< unpacket_traits< Packet >::size > &ifPacket, const Packet &thenPacket, const Packet &elsePacket)
template<typename Derived >
std::ostream & print_matrix (std::ostream &s, const Derived &_m, const IOFormat &fmt)
template<typename Dst , typename Lhs , typename Rhs , typename Func >
EIGEN_DONT_INLINE void outer_product_selector_run (Dst &dst, const Lhs &lhs, const Rhs &rhs, const Func &func, const false_type &)
template<typename Dst , typename Lhs , typename Rhs , typename Func >
EIGEN_DONT_INLINE void outer_product_selector_run (Dst &dst, const Lhs &lhs, const Rhs &rhs, const Func &func, const true_type &)
std::ptrdiff_t manage_caching_sizes_helper (std::ptrdiff_t a, std::ptrdiff_t b)
void manage_caching_sizes (Action action, std::ptrdiff_t *l1, std::ptrdiff_t *l2, std::ptrdiff_t *l3)
template<typename LhsScalar , typename RhsScalar , int KcFactor>
void evaluateProductBlockingSizesHeuristic (Index &k, Index &m, Index &n, Index num_threads=1)
bool useSpecificBlockingSizes (Index &k, Index &m, Index &n)
template<typename LhsScalar , typename RhsScalar , int KcFactor>
void computeProductBlockingSizes (Index &k, Index &m, Index &n, Index num_threads=1)
 Computes the blocking parameters for a m x k times k x n matrix product.
template<typename LhsScalar , typename RhsScalar >
void computeProductBlockingSizes (Index &k, Index &m, Index &n, Index num_threads=1)
template<typename CJ , typename A , typename B , typename C , typename T >
EIGEN_STRONG_INLINE void gebp_madd (const CJ &cj, A &a, B &b, C &c, T &t)
template<typename Packet >
DoublePacket< Packet > padd (const DoublePacket< Packet > &a, const DoublePacket< Packet > &b)
template<typename Packet >
const DoublePacket< Packet > & predux4 (const DoublePacket< Packet > &a)
void manage_multi_threading (Action action, int *v)
template<bool Condition, typename Functor , typename Index >
void parallelize_gemm (const Functor &func, Index rows, Index cols, bool transpose)
template<typename ExpressionType , typename Scalar >
void stable_norm_kernel (const ExpressionType &bl, Scalar &ssq, Scalar &scale, Scalar &invScale)
template<typename Derived >
NumTraits< typename traits
< Derived >::Scalar >::Real 
blueNorm_impl (const EigenBase< Derived > &_vec)
template<int Mode, bool SetOpposite, typename DstXprType , typename SrcXprType , typename Functor >
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void 
call_triangular_assignment_loop (const DstXprType &dst, const SrcXprType &src, const Functor &func)
template<int Mode, bool SetOpposite, typename DstXprType , typename SrcXprType >
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void 
call_triangular_assignment_loop (const DstXprType &dst, const SrcXprType &src)
template<typename T >
const T::Scalar * extract_data (const T &m)
bool copy_bool (bool b)
template<typename T >
EIGEN_DEVICE_FUNC void ignore_unused_variable (const T &)
EIGEN_DEVICE_FUNC void throw_std_bad_alloc ()
void * handmade_aligned_malloc (std::size_t size)
void handmade_aligned_free (void *ptr)
void * handmade_aligned_realloc (void *ptr, std::size_t size, std::size_t=0)
 Reallocates aligned memory. Since we know that our handmade version is based on std::malloc we can use std::realloc to implement efficient reallocation.
EIGEN_DEVICE_FUNC void check_that_malloc_is_allowed ()
EIGEN_DEVICE_FUNC void * aligned_malloc (size_t size)
EIGEN_DEVICE_FUNC void aligned_free (void *ptr)
void * aligned_realloc (void *ptr, size_t new_size, size_t old_size)
 Reallocates an aligned block of memory.
template<bool Align>
EIGEN_DEVICE_FUNC void * conditional_aligned_malloc (size_t size)
template<>
EIGEN_DEVICE_FUNC void * conditional_aligned_malloc< false > (size_t size)
template<bool Align>
EIGEN_DEVICE_FUNC void conditional_aligned_free (void *ptr)
template<>
EIGEN_DEVICE_FUNC void conditional_aligned_free< false > (void *ptr)
template<bool Align>
void * conditional_aligned_realloc (void *ptr, size_t new_size, size_t old_size)
template<>
void * conditional_aligned_realloc< false > (void *ptr, size_t new_size, size_t)
template<typename T >
EIGEN_DEVICE_FUNC void destruct_elements_of_array (T *ptr, size_t size)
template<typename T >
EIGEN_DEVICE_FUNC Tconstruct_elements_of_array (T *ptr, size_t size)
template<typename T >
EIGEN_DEVICE_FUNC
EIGEN_ALWAYS_INLINE void 
check_size_for_overflow (size_t size)
template<typename T >
EIGEN_DEVICE_FUNC Taligned_new (size_t size)
template<typename T , bool Align>
EIGEN_DEVICE_FUNC Tconditional_aligned_new (size_t size)
template<typename T >
EIGEN_DEVICE_FUNC void aligned_delete (T *ptr, size_t size)
template<typename T , bool Align>
EIGEN_DEVICE_FUNC void conditional_aligned_delete (T *ptr, size_t size)
template<typename T , bool Align>
EIGEN_DEVICE_FUNC Tconditional_aligned_realloc_new (T *pts, size_t new_size, size_t old_size)
template<typename T , bool Align>
EIGEN_DEVICE_FUNC Tconditional_aligned_new_auto (size_t size)
template<typename T , bool Align>
Tconditional_aligned_realloc_new_auto (T *pts, size_t new_size, size_t old_size)
template<typename T , bool Align>
EIGEN_DEVICE_FUNC void conditional_aligned_delete_auto (T *ptr, size_t size)
template<int Alignment, typename Scalar , typename Index >
EIGEN_DEVICE_FUNC Index first_aligned (const Scalar *array, Index size)
template<typename Scalar , typename Index >
EIGEN_DEVICE_FUNC Index first_default_aligned (const Scalar *array, Index size)
template<typename Index >
Index first_multiple (Index size, Index base)
template<typename T >
EIGEN_DEVICE_FUNC void smart_copy (const T *start, const T *end, T *target)
template<typename T >
void smart_memmove (const T *start, const T *end, T *target)
template<typename T >
void swap (scoped_array< T > &a, scoped_array< T > &b)
void queryCacheSizes (int &l1, int &l2, int &l3)
int queryL1CacheSize ()
int queryTopLevelCacheSize ()
template<typename IndexDest , typename IndexSrc >
EIGEN_DEVICE_FUNC IndexDest convert_index (const IndexSrc &idx)
template<typename T >
EIGEN_DEVICE_FUNC Tconst_cast_ptr (const T *ptr)
template<typename T1 , typename T2 >
bool is_same_dense (const T1 &mat1, const T2 &mat2, typename enable_if< has_direct_access< T1 >::ret &&has_direct_access< T2 >::ret, T1 >::type *=0)
template<typename T1 , typename T2 >
bool is_same_dense (const T1 &, const T2 &, typename enable_if<!(has_direct_access< T1 >::ret &&has_direct_access< T2 >::ret), T1 >::type *=0)
 EIGEN_MEMBER_FUNCTOR (squaredNorm, Size *NumTraits< Scalar >::MulCost+(Size-1)*NumTraits< Scalar >::AddCost)
 EIGEN_MEMBER_FUNCTOR (norm,(Size+5)*NumTraits< Scalar >::MulCost+(Size-1)*NumTraits< Scalar >::AddCost)
 EIGEN_MEMBER_FUNCTOR (stableNorm,(Size+5)*NumTraits< Scalar >::MulCost+(Size-1)*NumTraits< Scalar >::AddCost)
 EIGEN_MEMBER_FUNCTOR (blueNorm,(Size+5)*NumTraits< Scalar >::MulCost+(Size-1)*NumTraits< Scalar >::AddCost)
 EIGEN_MEMBER_FUNCTOR (hypotNorm,(Size-1)*functor_traits< scalar_hypot_op< Scalar > >::Cost)
 EIGEN_MEMBER_FUNCTOR (sum,(Size-1)*NumTraits< Scalar >::AddCost)
 EIGEN_MEMBER_FUNCTOR (mean,(Size-1)*NumTraits< Scalar >::AddCost+NumTraits< Scalar >::MulCost)
 EIGEN_MEMBER_FUNCTOR (minCoeff,(Size-1)*NumTraits< Scalar >::AddCost)
 EIGEN_MEMBER_FUNCTOR (maxCoeff,(Size-1)*NumTraits< Scalar >::AddCost)
 EIGEN_MEMBER_FUNCTOR (all,(Size-1)*NumTraits< Scalar >::AddCost)
 EIGEN_MEMBER_FUNCTOR (any,(Size-1)*NumTraits< Scalar >::AddCost)
 EIGEN_MEMBER_FUNCTOR (count,(Size-1)*NumTraits< Scalar >::AddCost)
 EIGEN_MEMBER_FUNCTOR (prod,(Size-1)*NumTraits< Scalar >::MulCost)
template<typename MatrixType , typename DiagType , typename SubDiagType >
ComputationInfo computeFromTridiagonal_impl (DiagType &diag, SubDiagType &subdiag, const Index maxIterations, bool computeEigenvectors, MatrixType &eivec)
 Compute the eigendecomposition from a tridiagonal matrix.
template<int StorageOrder, typename RealScalar , typename Scalar , typename Index >
static EIGEN_DEVICE_FUNC void tridiagonal_qr_step (RealScalar *diag, RealScalar *subdiag, Index start, Index end, Scalar *matrixQ, Index n)
template<typename MatrixType , typename CoeffVectorType >
void tridiagonalization_inplace (MatrixType &matA, CoeffVectorType &hCoeffs)
template<typename MatrixType , typename DiagonalType , typename SubDiagonalType >
void tridiagonalization_inplace (MatrixType &mat, DiagonalType &diag, SubDiagonalType &subdiag, bool extractQ)
 Performs a full tridiagonalization in place.
template<typename Scalar , int Dim>
static Matrix< Scalar, 2, 2 > toRotationMatrix (const Scalar &s)
template<typename Scalar , int Dim, typename OtherDerived >
static Matrix< Scalar, Dim, Dim > toRotationMatrix (const RotationBase< OtherDerived, Dim > &r)
template<typename Scalar , int Dim, typename OtherDerived >
static const MatrixBase
< OtherDerived > & 
toRotationMatrix (const MatrixBase< OtherDerived > &mat)
template<typename TriangularFactorType , typename VectorsType , typename CoeffsType >
void make_block_householder_triangular_factor (TriangularFactorType &triFactor, const VectorsType &vectors, const CoeffsType &hCoeffs)
template<typename MatrixType , typename VectorsType , typename CoeffsType >
void apply_block_householder_on_the_left (MatrixType &mat, const VectorsType &vectors, const CoeffsType &hCoeffs, bool forward)
template<typename MatrixType , typename Rhs , typename Dest , typename Preconditioner >
bool bicgstab (const MatrixType &mat, const Rhs &rhs, Dest &x, const Preconditioner &precond, Index &iters, typename Dest::RealScalar &tol_error)
template<typename MatrixType , typename Rhs , typename Dest , typename Preconditioner >
EIGEN_DONT_INLINE void conjugate_gradient (const MatrixType &mat, const Rhs &rhs, Dest &x, const Preconditioner &precond, Index &iters, typename Dest::RealScalar &tol_error)
template<typename VectorV , typename VectorI >
Index QuickSplit (VectorV &row, VectorI &ind, Index ncut)
template<typename MatrixType , typename Rhs , typename Dest , typename Preconditioner >
EIGEN_DONT_INLINE void least_square_conjugate_gradient (const MatrixType &mat, const Rhs &rhs, Dest &x, const Preconditioner &precond, Index &iters, typename Dest::RealScalar &tol_error)
template<typename VectorX , typename VectorY , typename OtherScalar >
void apply_rotation_in_the_plane (DenseBase< VectorX > &xpr_x, DenseBase< VectorY > &xpr_y, const JacobiRotation< OtherScalar > &j)
template<typename Derived >
const Derived::Scalar bruteforce_det3_helper (const MatrixBase< Derived > &matrix, int a, int b, int c)
template<typename Derived >
const Derived::Scalar bruteforce_det4_helper (const MatrixBase< Derived > &matrix, int j, int k, int m, int n)
template<typename MatrixType , typename ResultType >
EIGEN_DEVICE_FUNC void compute_inverse_size2_helper (const MatrixType &matrix, const typename ResultType::Scalar &invdet, ResultType &result)
template<typename MatrixType , int i, int j>
EIGEN_DEVICE_FUNC
MatrixType::Scalar 
cofactor_3x3 (const MatrixType &m)
template<typename MatrixType , typename ResultType >
EIGEN_DEVICE_FUNC void compute_inverse_size3_helper (const MatrixType &matrix, const typename ResultType::Scalar &invdet, const Matrix< typename ResultType::Scalar, 3, 1 > &cofactors_col0, ResultType &result)
template<typename Derived >
EIGEN_DEVICE_FUNC const
Derived::Scalar 
general_det3_helper (const MatrixBase< Derived > &matrix, int i1, int i2, int i3, int j1, int j2, int j3)
template<typename MatrixType , int i, int j>
EIGEN_DEVICE_FUNC
MatrixType::Scalar 
cofactor_4x4 (const MatrixType &matrix)
template<typename MatrixType , typename TranspositionType >
void partial_lu_inplace (MatrixType &lu, TranspositionType &row_transpositions, typename TranspositionType::StorageIndex &nb_transpositions)
template<typename T >
T amd_flip (const T &i)
template<typename T >
T amd_unflip (const T &i)
template<typename T0 , typename T1 >
bool amd_marked (const T0 *w, const T1 &j)
template<typename T0 , typename T1 >
void amd_mark (const T0 *w, const T1 &j)
template<typename StorageIndex >
static StorageIndex cs_wclear (StorageIndex mark, StorageIndex lemax, StorageIndex *w, StorageIndex n)
template<typename StorageIndex >
StorageIndex cs_tdfs (StorageIndex j, StorageIndex k, StorageIndex *head, const StorageIndex *next, StorageIndex *post, StorageIndex *stack)
template<typename Scalar , typename StorageIndex >
void minimum_degree_ordering (SparseMatrix< Scalar, ColMajor, StorageIndex > &C, PermutationMatrix< Dynamic, Dynamic, StorageIndex > &perm)
template<typename IndexType >
IndexType colamd_c (IndexType n_col)
template<typename IndexType >
IndexType colamd_r (IndexType n_row)
template<typename IndexType >
static IndexType init_rows_cols (IndexType n_row, IndexType n_col, Colamd_Row< IndexType > Row[], colamd_col< IndexType > col[], IndexType A[], IndexType p[], IndexType stats[COLAMD_STATS])
template<typename IndexType >
static void init_scoring (IndexType n_row, IndexType n_col, Colamd_Row< IndexType > Row[], colamd_col< IndexType > Col[], IndexType A[], IndexType head[], double knobs[COLAMD_KNOBS], IndexType *p_n_row2, IndexType *p_n_col2, IndexType *p_max_deg)
template<typename IndexType >
static IndexType find_ordering (IndexType n_row, IndexType n_col, IndexType Alen, Colamd_Row< IndexType > Row[], colamd_col< IndexType > Col[], IndexType A[], IndexType head[], IndexType n_col2, IndexType max_deg, IndexType pfree)
template<typename IndexType >
static void order_children (IndexType n_col, colamd_col< IndexType > Col[], IndexType p[])
template<typename IndexType >
static void detect_super_cols (colamd_col< IndexType > Col[], IndexType A[], IndexType head[], IndexType row_start, IndexType row_length)
template<typename IndexType >
static IndexType garbage_collection (IndexType n_row, IndexType n_col, Colamd_Row< IndexType > Row[], colamd_col< IndexType > Col[], IndexType A[], IndexType *pfree)
template<typename IndexType >
static IndexType clear_mark (IndexType n_row, Colamd_Row< IndexType > Row[])
template<typename IndexType >
IndexType colamd_recommended (IndexType nnz, IndexType n_row, IndexType n_col)
 Returns the recommended value of Alen.
static void colamd_set_defaults (double knobs[COLAMD_KNOBS])
 set default parameters The use of this routine is optional.
template<typename IndexType >
static bool colamd (IndexType n_row, IndexType n_col, IndexType Alen, IndexType *A, IndexType *p, double knobs[COLAMD_KNOBS], IndexType stats[COLAMD_STATS])
 Computes a column ordering using the column approximate minimum degree ordering.
template<typename MatrixType >
void ordering_helper_at_plus_a (const MatrixType &A, MatrixType &symmat)
void eigen_pastix (pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, float *vals, int *perm, int *invp, float *x, int nbrhs, int *iparm, double *dparm)
void eigen_pastix (pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, double *vals, int *perm, int *invp, double *x, int nbrhs, int *iparm, double *dparm)
void eigen_pastix (pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex< float > *vals, int *perm, int *invp, std::complex< float > *x, int nbrhs, int *iparm, double *dparm)
void eigen_pastix (pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex< double > *vals, int *perm, int *invp, std::complex< double > *x, int nbrhs, int *iparm, double *dparm)
template<typename MatrixType >
void c_to_fortran_numbering (MatrixType &mat)
template<typename MatrixType >
void fortran_to_c_numbering (MatrixType &mat)
template<typename MatrixQR , typename HCoeffs >
void householder_qr_inplace_unblocked (MatrixQR &mat, HCoeffs &hCoeffs, typename MatrixQR::Scalar *tempData=0)
template<typename Lhs , typename Rhs , typename ResultType >
static void conservative_sparse_sparse_product_impl (const Lhs &lhs, const Rhs &rhs, ResultType &res, bool sortedInsertion=false)
template<typename Lhs , typename Rhs , typename ResultType >
static void sparse_sparse_to_dense_product_impl (const Lhs &lhs, const Rhs &rhs, ResultType &res)
template<typename DstXprType , typename SrcXprType >
void assign_sparse_to_sparse (DstXprType &dst, const SrcXprType &src)
template<typename Index , typename IndexVector >
Index etree_find (Index i, IndexVector &pp)
template<typename MatrixType , typename IndexVector >
int coletree (const MatrixType &mat, IndexVector &parent, IndexVector &firstRowElt, typename MatrixType::StorageIndex *perm=0)
template<typename IndexVector >
void nr_etdfs (typename IndexVector::Scalar n, IndexVector &parent, IndexVector &first_kid, IndexVector &next_kid, IndexVector &post, typename IndexVector::Scalar postnum)
template<typename IndexVector >
void treePostorder (typename IndexVector::Scalar n, IndexVector &parent, IndexVector &post)
 Post order a tree.
template<typename SparseLhsType , typename DenseRhsType , typename DenseResType , typename AlphaType >
void sparse_time_dense_product (const SparseLhsType &lhs, const DenseRhsType &rhs, DenseResType &res, const AlphaType &alpha)
template<typename InputIterator , typename SparseMatrixType , typename DupFunctor >
void set_from_triplets (const InputIterator &begin, const InputIterator &end, SparseMatrixType &mat, DupFunctor dup_func)
template<int SrcMode, int DstMode, typename MatrixType , int DestOrder>
void permute_symm_to_symm (const MatrixType &mat, SparseMatrix< typename MatrixType::Scalar, DestOrder, typename MatrixType::StorageIndex > &_dest, const typename MatrixType::StorageIndex *perm=0)
template<int Mode, typename MatrixType , int DestOrder>
void permute_symm_to_fullsymm (const MatrixType &mat, SparseMatrix< typename MatrixType::Scalar, DestOrder, typename MatrixType::StorageIndex > &_dest, const typename MatrixType::StorageIndex *perm=0)
template<int Mode, typename SparseLhsType , typename DenseRhsType , typename DenseResType , typename AlphaType >
void sparse_selfadjoint_time_dense_product (const SparseLhsType &lhs, const DenseRhsType &rhs, DenseResType &res, const AlphaType &alpha)
template<int _SrcMode, int _DstMode, typename MatrixType , int DstOrder>
void permute_symm_to_symm (const MatrixType &mat, SparseMatrix< typename MatrixType::Scalar, DstOrder, typename MatrixType::StorageIndex > &_dest, const typename MatrixType::StorageIndex *perm)
template<typename Decomposition , typename Rhs , typename Dest >
void solve_sparse_through_dense_panels (const Decomposition &dec, const Rhs &rhs, Dest &dest)
template<typename Lhs , typename Rhs , typename ResultType >
static void sparse_sparse_product_with_pruning_impl (const Lhs &lhs, const Rhs &rhs, ResultType &res, const typename ResultType::RealScalar &tolerance)
template<typename Scalar >
EIGEN_DONT_INLINE void sparselu_gemm (Index m, Index n, Index d, const Scalar *A, Index lda, const Scalar *B, Index ldb, Scalar *C, Index ldc)
Index LUnumTempV (Index &m, Index &w, Index &t, Index &b)
template<typename Scalar >
Index LUTempSpace (Index &m, Index &w)
template<typename MatrixType >
SluMatrix asSluMatrix (MatrixType &mat)
template<typename Scalar , int Flags, typename Index >
MappedSparseMatrix< Scalar,
Flags, Index
map_superlu (SluMatrix &sluMat)
template<typename MatrixType , typename RealScalar , typename Index >
void real_2x2_jacobi_svd (const MatrixType &matrix, Index p, Index q, JacobiRotation< RealScalar > *j_left, JacobiRotation< RealScalar > *j_right)
template<typename MatrixType >
void upperbidiagonalization_inplace_unblocked (MatrixType &mat, typename MatrixType::RealScalar *diagonal, typename MatrixType::RealScalar *upper_diagonal, typename MatrixType::Scalar *tempData=0)
template<typename MatrixType >
void upperbidiagonalization_blocked_helper (MatrixType &A, typename MatrixType::RealScalar *diagonal, typename MatrixType::RealScalar *upper_diagonal, Index bs, Ref< Matrix< typename MatrixType::Scalar, Dynamic, Dynamic, traits< MatrixType >::Flags &RowMajorBit > > X, Ref< Matrix< typename MatrixType::Scalar, Dynamic, Dynamic, traits< MatrixType >::Flags &RowMajorBit > > Y)
template<typename MatrixType , typename BidiagType >
void upperbidiagonalization_inplace_blocked (MatrixType &A, BidiagType &bidiagonal, Index maxBlockSize=32, typename MatrixType::Scalar *=0)

Variables

static Packet4ui p4ui_CONJ_XOR = vec_mergeh((Packet4ui)p4i_ZERO, (Packet4ui)p4f_ZERO_)
static Packet2ul p2ul_CONJ_XOR1 = (Packet2ul) vec_sld((Packet4ui) p2l_ZERO, (Packet4ui) p2d_ZERO_, 8)
static Packet2ul p2ul_CONJ_XOR2 = (Packet2ul) vec_sld((Packet4ui) p2d_ZERO_, (Packet4ui) p2l_ZERO, 8)
static Packet4f p4f_ONE = vec_ctf(p4i_ONE, 0)
static Packet4f p4f_ZERO_ = (Packet4f) vec_sl((Packet4ui)p4i_MINUS1, (Packet4ui)p4i_MINUS1)
static Packet4f p4f_COUNTDOWN = { 0.0, 1.0, 2.0, 3.0 }
static Packet4i p4i_COUNTDOWN = { 0, 1, 2, 3 }
static Packet16uc p16uc_REVERSE32 = { 12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3 }
static Packet16uc p16uc_DUPLICATE32_HI = { 0,1,2,3, 0,1,2,3, 4,5,6,7, 4,5,6,7 }
static Packet16uc p16uc_FORWARD = p16uc_REVERSE32
static Packet16uc p16uc_REVERSE64 = { 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 }
static Packet16uc p16uc_PSET32_WODD = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 1), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 3), 8)
static Packet16uc p16uc_PSET32_WEVEN = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 0), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 2), 8)
static Packet16uc p16uc_HALF64_0_16 = vec_sld(vec_splat((Packet16uc) vec_abs(p4i_MINUS16), 0), (Packet16uc)p4i_ZERO, 8)
static Packet16uc p16uc_PSET64_HI = (Packet16uc) vec_mergeh((Packet4ui)p16uc_PSET32_WODD, (Packet4ui)p16uc_PSET32_WEVEN)
static Packet16uc p16uc_PSET64_LO = (Packet16uc) vec_mergel((Packet4ui)p16uc_PSET32_WODD, (Packet4ui)p16uc_PSET32_WEVEN)
static Packet16uc p16uc_TRANSPOSE64_HI = vec_add(p16uc_PSET64_HI, p16uc_HALF64_0_16)
static Packet16uc p16uc_TRANSPOSE64_LO = vec_add(p16uc_PSET64_LO, p16uc_HALF64_0_16)
static Packet16uc p16uc_COMPLEX32_REV = vec_sld(p16uc_REVERSE32, p16uc_REVERSE32, 8)
static Packet16uc p16uc_COMPLEX32_REV2 = vec_sld(p16uc_PSET64_HI, p16uc_PSET64_LO, 8)
static uint32x2_t p2ui_CONJ_XOR = EIGEN_INIT_NEON_PACKET2(0x00000000, 0x80000000)
const std::ptrdiff_t defaultL1CacheSize = 16*1024
const std::ptrdiff_t defaultL2CacheSize = 512*1024
const std::ptrdiff_t defaultL3CacheSize = 512*1024

Typedef Documentation

typedef __vector unsigned char Eigen::internal::Packet16uc

Definition at line 39 of file AltiVec/PacketMath.h.

typedef __m128d Eigen::internal::Packet2d

Definition at line 57 of file SSE/PacketMath.h.

typedef float32x2_t Eigen::internal::Packet2f

Definition at line 37 of file NEON/PacketMath.h.

typedef int32x2_t Eigen::internal::Packet2i

Definition at line 40 of file NEON/PacketMath.h.

typedef __vector __bool int Eigen::internal::Packet4bi

Definition at line 37 of file AltiVec/PacketMath.h.

typedef __m256d Eigen::internal::Packet4d

Definition at line 33 of file AVX/PacketMath.h.

typedef __m128 Eigen::internal::Packet4f

Definition at line 34 of file AltiVec/PacketMath.h.

typedef __m128i Eigen::internal::Packet4i

Definition at line 35 of file AltiVec/PacketMath.h.

typedef uint32x4_t Eigen::internal::Packet4ui

Definition at line 36 of file AltiVec/PacketMath.h.

typedef __m256 Eigen::internal::Packet8f

Definition at line 31 of file AVX/PacketMath.h.

typedef __m256i Eigen::internal::Packet8i

Definition at line 38 of file AltiVec/PacketMath.h.


Enumeration Type Documentation

anonymous enum
Enumerator:
SDP_AsScalarProduct 
SDP_AsCwiseProduct 

Definition at line 29 of file SparseDiagonalProduct.h.

anonymous enum
Enumerator:
SVA_RuntimeSwitch 
SVA_Inner 
SVA_Outer 

Definition at line 49 of file SparseVector.h.

anonymous enum
Enumerator:
LUNoMarker 

Definition at line 37 of file SparseLU_Memory.h.

{ LUNoMarker = 3 };
anonymous enum
Enumerator:
emptyIdxLU 

Definition at line 38 of file SparseLU_Memory.h.

{emptyIdxLU = -1};
anonymous enum
Enumerator:
PreconditionIfMoreColsThanRows 
PreconditionIfMoreRowsThanCols 

Definition at line 30 of file JacobiSVD.h.

Constants for comparison functors

Enumerator:
cmp_EQ 
cmp_LT 
cmp_LE 
cmp_UNORD 
cmp_NEQ 
cmp_GT 
cmp_GE 

Definition at line 533 of file Constants.h.

                    {
  cmp_EQ = 0,
  cmp_LT = 1,
  cmp_LE = 2,
  cmp_UNORD = 3,
  cmp_NEQ = 4,
  cmp_GT = 5,
  cmp_GE = 6
};
Enumerator:
LUSUP 
UCOL 
LSUB 
USUB 
LLVL 
ULVL 

Definition at line 74 of file SparseLU_Structs.h.

Enumerator:
PermPermProduct 

Definition at line 18 of file PermutationMatrix.h.

Enumerator:
PositiveSemiDef 
NegativeSemiDef 
ZeroSign 
Indefinite 

Definition at line 22 of file LDLT.h.


Function Documentation

static Eigen::internal::_EIGEN_DECLARE_CONST_FAST_Packet4f ( ZERO  ,
 
) [static]
static Eigen::internal::_EIGEN_DECLARE_CONST_FAST_Packet4i ( ZERO  ,
 
) [static]
static Eigen::internal::_EIGEN_DECLARE_CONST_FAST_Packet4i ( ONE  ,
 
) [static]
static Eigen::internal::_EIGEN_DECLARE_CONST_FAST_Packet4i ( MINUS16  ,
16 
) [static]
static Eigen::internal::_EIGEN_DECLARE_CONST_FAST_Packet4i ( MINUS1  ,
1 
) [static]
template<typename T >
EIGEN_DEVICE_FUNC void Eigen::internal::aligned_delete ( T ptr,
size_t  size 
) [inline]

Deletes objects constructed with aligned_new The size parameters tells on how many objects to call the destructor of T.

Definition at line 328 of file Memory.h.

{
  destruct_elements_of_array<T>(ptr, size);
  aligned_free(ptr);
}
EIGEN_DEVICE_FUNC void Eigen::internal::aligned_free ( void *  ptr) [inline]

Frees memory allocated with aligned_malloc.

Definition at line 174 of file Memory.h.

{
  #if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED
    std::free(ptr);
  #else
    handmade_aligned_free(ptr);
  #endif
}
EIGEN_DEVICE_FUNC void* Eigen::internal::aligned_malloc ( size_t  size) [inline]

Allocates size bytes. The returned pointer is guaranteed to have 16 or 32 bytes alignment depending on the requirements. On allocation error, the returned pointer is null, and std::bad_alloc is thrown.

Definition at line 153 of file Memory.h.

{
  check_that_malloc_is_allowed();

  void *result;
  #if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED
    result = std::malloc(size);
    #if EIGEN_DEFAULT_ALIGN_BYTES==16
    eigen_assert((size<16 || (std::size_t(result)%16)==0) && "System's malloc returned an unaligned pointer. Compile with EIGEN_MALLOC_ALREADY_ALIGNED=0 to fallback to handmade alignd memory allocator.");
    #endif
  #else
    result = handmade_aligned_malloc(size);
  #endif

  if(!result && size)
    throw_std_bad_alloc();

  return result;
}
template<typename T >
EIGEN_DEVICE_FUNC T* Eigen::internal::aligned_new ( size_t  size) [inline]

Allocates size objects of type T. The returned pointer is guaranteed to have 16 bytes alignment. On allocation error, the returned pointer is undefined, but a std::bad_alloc is thrown. The default constructor of T is called.

Definition at line 295 of file Memory.h.

{
  check_size_for_overflow<T>(size);
  T *result = reinterpret_cast<T*>(aligned_malloc(sizeof(T)*size));
  EIGEN_TRY
  {
    return construct_elements_of_array(result, size);
  }
  EIGEN_CATCH(...)
  {
    aligned_free(result);
    EIGEN_THROW;
  }
}
void* Eigen::internal::aligned_realloc ( void *  ptr,
size_t  new_size,
size_t  old_size 
) [inline]

Reallocates an aligned block of memory.

Exceptions:
std::bad_allocon allocation failure

Definition at line 188 of file Memory.h.

{
  EIGEN_UNUSED_VARIABLE(old_size);

  void *result;
#if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED
  result = std::realloc(ptr,new_size);
#else
  result = handmade_aligned_realloc(ptr,new_size,old_size);
#endif

  if (!result && new_size)
    throw_std_bad_alloc();

  return result;
}
template<typename T >
T Eigen::internal::amd_flip ( const T i) [inline]

Definition at line 38 of file Amd.h.

{ return -i-2; }
template<typename T0 , typename T1 >
void Eigen::internal::amd_mark ( const T0 *  w,
const T1 &  j 
) [inline]

Definition at line 41 of file Amd.h.

{ return w[j] = amd_flip(w[j]); }
template<typename T0 , typename T1 >
bool Eigen::internal::amd_marked ( const T0 *  w,
const T1 &  j 
) [inline]

Definition at line 40 of file Amd.h.

{ return w[j]<0; }
template<typename T >
T Eigen::internal::amd_unflip ( const T i) [inline]

Definition at line 39 of file Amd.h.

{ return i<0 ? amd_flip(i) : i; }
template<typename MatrixType , typename VectorsType , typename CoeffsType >
void Eigen::internal::apply_block_householder_on_the_left ( MatrixType &  mat,
const VectorsType &  vectors,
const CoeffsType &  hCoeffs,
bool  forward 
)

if forward then perform mat = H0 * H1 * H2 * mat otherwise perform mat = H2 * H1 * H0 * mat

Definition at line 79 of file BlockHouseholder.h.

{
  enum { TFactorSize = MatrixType::ColsAtCompileTime };
  Index nbVecs = vectors.cols();
  Matrix<typename MatrixType::Scalar, TFactorSize, TFactorSize, RowMajor> T(nbVecs,nbVecs);
  
  if(forward) make_block_householder_triangular_factor(T, vectors, hCoeffs);
  else        make_block_householder_triangular_factor(T, vectors, hCoeffs.conjugate());  
  const TriangularView<const VectorsType, UnitLower> V(vectors);

  // A -= V T V^* A
  Matrix<typename MatrixType::Scalar,VectorsType::ColsAtCompileTime,MatrixType::ColsAtCompileTime,0,
         VectorsType::MaxColsAtCompileTime,MatrixType::MaxColsAtCompileTime> tmp = V.adjoint() * mat;
  // FIXME add .noalias() once the triangular product can work inplace
  if(forward) tmp = T.template triangularView<Upper>()           * tmp;
  else        tmp = T.template triangularView<Upper>().adjoint() * tmp;
  mat.noalias() -= V * tmp;
}
template<typename VectorX , typename VectorY , typename OtherScalar >
void Eigen::internal::apply_rotation_in_the_plane ( DenseBase< VectorX > &  xpr_x,
DenseBase< VectorY > &  xpr_y,
const JacobiRotation< OtherScalar > &  j 
)

Applies the clock wise 2D rotation j to the set of 2D vectors of cordinates x and y: $ \left ( \begin{array}{cc} x \\ y \end{array} \right ) = J \left ( \begin{array}{cc} x \\ y \end{array} \right ) $

See also:
MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()

Definition at line 301 of file Jacobi.h.

{
  typedef typename VectorX::Scalar Scalar;
  enum { PacketSize = packet_traits<Scalar>::size };
  typedef typename packet_traits<Scalar>::type Packet;
  eigen_assert(xpr_x.size() == xpr_y.size());
  Index size = xpr_x.size();
  Index incrx = xpr_x.derived().innerStride();
  Index incry = xpr_y.derived().innerStride();

  Scalar* EIGEN_RESTRICT x = &xpr_x.derived().coeffRef(0);
  Scalar* EIGEN_RESTRICT y = &xpr_y.derived().coeffRef(0);
  
  OtherScalar c = j.c();
  OtherScalar s = j.s();
  if (c==OtherScalar(1) && s==OtherScalar(0))
    return;

  /*** dynamic-size vectorized paths ***/

  if(VectorX::SizeAtCompileTime == Dynamic &&
    (VectorX::Flags & VectorY::Flags & PacketAccessBit) &&
    ((incrx==1 && incry==1) || PacketSize == 1))
  {
    // both vectors are sequentially stored in memory => vectorization
    enum { Peeling = 2 };

    Index alignedStart = internal::first_default_aligned(y, size);
    Index alignedEnd = alignedStart + ((size-alignedStart)/PacketSize)*PacketSize;

    const Packet pc = pset1<Packet>(c);
    const Packet ps = pset1<Packet>(s);
    conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex,false> pcj;

    for(Index i=0; i<alignedStart; ++i)
    {
      Scalar xi = x[i];
      Scalar yi = y[i];
      x[i] =  c * xi + numext::conj(s) * yi;
      y[i] = -s * xi + numext::conj(c) * yi;
    }

    Scalar* EIGEN_RESTRICT px = x + alignedStart;
    Scalar* EIGEN_RESTRICT py = y + alignedStart;

    if(internal::first_default_aligned(x, size)==alignedStart)
    {
      for(Index i=alignedStart; i<alignedEnd; i+=PacketSize)
      {
        Packet xi = pload<Packet>(px);
        Packet yi = pload<Packet>(py);
        pstore(px, padd(pmul(pc,xi),pcj.pmul(ps,yi)));
        pstore(py, psub(pcj.pmul(pc,yi),pmul(ps,xi)));
        px += PacketSize;
        py += PacketSize;
      }
    }
    else
    {
      Index peelingEnd = alignedStart + ((size-alignedStart)/(Peeling*PacketSize))*(Peeling*PacketSize);
      for(Index i=alignedStart; i<peelingEnd; i+=Peeling*PacketSize)
      {
        Packet xi   = ploadu<Packet>(px);
        Packet xi1  = ploadu<Packet>(px+PacketSize);
        Packet yi   = pload <Packet>(py);
        Packet yi1  = pload <Packet>(py+PacketSize);
        pstoreu(px, padd(pmul(pc,xi),pcj.pmul(ps,yi)));
        pstoreu(px+PacketSize, padd(pmul(pc,xi1),pcj.pmul(ps,yi1)));
        pstore (py, psub(pcj.pmul(pc,yi),pmul(ps,xi)));
        pstore (py+PacketSize, psub(pcj.pmul(pc,yi1),pmul(ps,xi1)));
        px += Peeling*PacketSize;
        py += Peeling*PacketSize;
      }
      if(alignedEnd!=peelingEnd)
      {
        Packet xi = ploadu<Packet>(x+peelingEnd);
        Packet yi = pload <Packet>(y+peelingEnd);
        pstoreu(x+peelingEnd, padd(pmul(pc,xi),pcj.pmul(ps,yi)));
        pstore (y+peelingEnd, psub(pcj.pmul(pc,yi),pmul(ps,xi)));
      }
    }

    for(Index i=alignedEnd; i<size; ++i)
    {
      Scalar xi = x[i];
      Scalar yi = y[i];
      x[i] =  c * xi + numext::conj(s) * yi;
      y[i] = -s * xi + numext::conj(c) * yi;
    }
  }

  /*** fixed-size vectorized path ***/
  else if(VectorX::SizeAtCompileTime != Dynamic &&
          (VectorX::Flags & VectorY::Flags & PacketAccessBit) &&
          (EIGEN_PLAIN_ENUM_MIN(evaluator<VectorX>::Alignment, evaluator<VectorY>::Alignment)>0)) // FIXME should be compared to the required alignment
  {
    const Packet pc = pset1<Packet>(c);
    const Packet ps = pset1<Packet>(s);
    conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex,false> pcj;
    Scalar* EIGEN_RESTRICT px = x;
    Scalar* EIGEN_RESTRICT py = y;
    for(Index i=0; i<size; i+=PacketSize)
    {
      Packet xi = pload<Packet>(px);
      Packet yi = pload<Packet>(py);
      pstore(px, padd(pmul(pc,xi),pcj.pmul(ps,yi)));
      pstore(py, psub(pcj.pmul(pc,yi),pmul(ps,xi)));
      px += PacketSize;
      py += PacketSize;
    }
  }

  /*** non-vectorized path ***/
  else
  {
    for(Index i=0; i<size; ++i)
    {
      Scalar xi = *x;
      Scalar yi = *y;
      *x =  c * xi + numext::conj(s) * yi;
      *y = -s * xi + numext::conj(c) * yi;
      x += incrx;
      y += incry;
    }
  }
}
template<typename DstXprType , typename SrcXprType >
void Eigen::internal::assign_sparse_to_sparse ( DstXprType &  dst,
const SrcXprType &  src 
)

Definition at line 71 of file SparseAssign.h.

{
  typedef typename DstXprType::Scalar Scalar;
  typedef internal::evaluator<DstXprType> DstEvaluatorType;
  typedef internal::evaluator<SrcXprType> SrcEvaluatorType;

  SrcEvaluatorType srcEvaluator(src);

  const bool transpose = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit);
  const Index outerEvaluationSize = (SrcEvaluatorType::Flags&RowMajorBit) ? src.rows() : src.cols();
  if ((!transpose) && src.isRValue())
  {
    // eval without temporary
    dst.resize(src.rows(), src.cols());
    dst.setZero();
    dst.reserve((std::max)(src.rows(),src.cols())*2);
    for (Index j=0; j<outerEvaluationSize; ++j)
    {
      dst.startVec(j);
      for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it)
      {
        Scalar v = it.value();
        dst.insertBackByOuterInner(j,it.index()) = v;
      }
    }
    dst.finalize();
  }
  else
  {
    // eval through a temporary
    eigen_assert(( ((internal::traits<DstXprType>::SupportedAccessPatterns & OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
              (!((DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit)))) &&
              "the transpose operation is supposed to be handled in SparseMatrix::operator=");

    enum { Flip = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit) };

    
    DstXprType temp(src.rows(), src.cols());

    temp.reserve((std::max)(src.rows(),src.cols())*2);
    for (Index j=0; j<outerEvaluationSize; ++j)
    {
      temp.startVec(j);
      for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it)
      {
        Scalar v = it.value();
        temp.insertBackByOuterInner(Flip?it.index():j,Flip?j:it.index()) = v;
      }
    }
    temp.finalize();

    dst = temp.markAsRValue();
  }
}
template<typename MatrixType >
SluMatrix Eigen::internal::asSluMatrix ( MatrixType &  mat)

Definition at line 265 of file SuperLUSupport.h.

{
  return SluMatrix::Map(mat);
}
template<typename MatrixType , typename Rhs , typename Dest , typename Preconditioner >
bool Eigen::internal::bicgstab ( const MatrixType &  mat,
const Rhs &  rhs,
Dest &  x,
const Preconditioner &  precond,
Index &  iters,
typename Dest::RealScalar &  tol_error 
)

Low-level bi conjugate gradient stabilized algorithm

Parameters:
matThe matrix A
rhsThe right hand side vector b
xOn input and initial solution, on output the computed solution.
precondA preconditioner being able to efficiently solve for an approximation of Ax=b (regardless of b)
itersOn input the max number of iteration, on output the number of performed iterations.
tol_errorOn input the tolerance error, on output an estimation of the relative error.
Returns:
false in the case of numerical issue, for example a break down of BiCGSTAB.

Definition at line 29 of file BiCGSTAB.h.

{
  using std::sqrt;
  using std::abs;
  typedef typename Dest::RealScalar RealScalar;
  typedef typename Dest::Scalar Scalar;
  typedef Matrix<Scalar,Dynamic,1> VectorType;
  RealScalar tol = tol_error;
  Index maxIters = iters;

  Index n = mat.cols();
  VectorType r  = rhs - mat * x;
  VectorType r0 = r;
  
  RealScalar r0_sqnorm = r0.squaredNorm();
  RealScalar rhs_sqnorm = rhs.squaredNorm();
  if(rhs_sqnorm == 0)
  {
    x.setZero();
    return true;
  }
  Scalar rho    = 1;
  Scalar alpha  = 1;
  Scalar w      = 1;
  
  VectorType v = VectorType::Zero(n), p = VectorType::Zero(n);
  VectorType y(n),  z(n);
  VectorType kt(n), ks(n);

  VectorType s(n), t(n);

  RealScalar tol2 = tol*tol*rhs_sqnorm;
  RealScalar eps2 = NumTraits<Scalar>::epsilon()*NumTraits<Scalar>::epsilon();
  Index i = 0;
  Index restarts = 0;

  while ( r.squaredNorm() > tol2 && i<maxIters )
  {
    Scalar rho_old = rho;

    rho = r0.dot(r);
    if (abs(rho) < eps2*r0_sqnorm)
    {
      // The new residual vector became too orthogonal to the arbitrarily chosen direction r0
      // Let's restart with a new r0:
      r  = rhs - mat * x;
      r0 = r;
      rho = r0_sqnorm = r.squaredNorm();
      if(restarts++ == 0)
        i = 0;
    }
    Scalar beta = (rho/rho_old) * (alpha / w);
    p = r + beta * (p - w * v);
    
    y = precond.solve(p);
    
    v.noalias() = mat * y;

    alpha = rho / r0.dot(v);
    s = r - alpha * v;

    z = precond.solve(s);
    t.noalias() = mat * z;

    RealScalar tmp = t.squaredNorm();
    if(tmp>RealScalar(0))
      w = t.dot(s) / tmp;
    else
      w = Scalar(0);
    x += alpha * y + w * z;
    r = s - w * t;
    ++i;
  }
  tol_error = sqrt(r.squaredNorm()/rhs_sqnorm);
  iters = i;
  return true; 
}
template<typename Derived >
NumTraits<typename traits<Derived>::Scalar>::Real Eigen::internal::blueNorm_impl ( const EigenBase< Derived > &  _vec) [inline]

Definition at line 55 of file StableNorm.h.

{
  typedef typename Derived::RealScalar RealScalar;  
  using std::pow;
  using std::sqrt;
  using std::abs;
  const Derived& vec(_vec.derived());
  static bool initialized = false;
  static RealScalar b1, b2, s1m, s2m, rbig, relerr;
  if(!initialized)
  {
    int ibeta, it, iemin, iemax, iexp;
    RealScalar eps;
    // This program calculates the machine-dependent constants
    // bl, b2, slm, s2m, relerr overfl
    // from the "basic" machine-dependent numbers
    // nbig, ibeta, it, iemin, iemax, rbig.
    // The following define the basic machine-dependent constants.
    // For portability, the PORT subprograms "ilmaeh" and "rlmach"
    // are used. For any specific computer, each of the assignment
    // statements can be replaced
    ibeta = std::numeric_limits<RealScalar>::radix;                 // base for floating-point numbers
    it    = std::numeric_limits<RealScalar>::digits;                // number of base-beta digits in mantissa
    iemin = std::numeric_limits<RealScalar>::min_exponent;          // minimum exponent
    iemax = std::numeric_limits<RealScalar>::max_exponent;          // maximum exponent
    rbig  = (std::numeric_limits<RealScalar>::max)();               // largest floating-point number

    iexp  = -((1-iemin)/2);
    b1    = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp)));    // lower boundary of midrange
    iexp  = (iemax + 1 - it)/2;
    b2    = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp)));    // upper boundary of midrange

    iexp  = (2-iemin)/2;
    s1m   = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp)));    // scaling factor for lower range
    iexp  = - ((iemax+it)/2);
    s2m   = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp)));    // scaling factor for upper range

    eps     = RealScalar(pow(double(ibeta), 1-it));
    relerr  = sqrt(eps);                                            // tolerance for neglecting asml
    initialized = true;
  }
  Index n = vec.size();
  RealScalar ab2 = b2 / RealScalar(n);
  RealScalar asml = RealScalar(0);
  RealScalar amed = RealScalar(0);
  RealScalar abig = RealScalar(0);
  for(typename Derived::InnerIterator it(vec, 0); it; ++it)
  {
    RealScalar ax = abs(it.value());
    if(ax > ab2)     abig += numext::abs2(ax*s2m);
    else if(ax < b1) asml += numext::abs2(ax*s1m);
    else             amed += numext::abs2(ax);
  }
  if(amed!=amed)
    return amed;  // we got a NaN
  if(abig > RealScalar(0))
  {
    abig = sqrt(abig);
    if(abig > rbig) // overflow, or *this contains INF values
      return abig;  // return INF
    if(amed > RealScalar(0))
    {
      abig = abig/s2m;
      amed = sqrt(amed);
    }
    else
      return abig/s2m;
  }
  else if(asml > RealScalar(0))
  {
    if (amed > RealScalar(0))
    {
      abig = sqrt(amed);
      amed = sqrt(asml) / s1m;
    }
    else
      return sqrt(asml)/s1m;
  }
  else
    return sqrt(amed);
  asml = numext::mini(abig, amed);
  abig = numext::maxi(abig, amed);
  if(asml <= abig*relerr)
    return abig;
  else
    return abig * sqrt(RealScalar(1) + numext::abs2(asml/abig));
}
template<typename Derived >
const Derived::Scalar Eigen::internal::bruteforce_det3_helper ( const MatrixBase< Derived > &  matrix,
int  a,
int  b,
int  c 
) [inline]

Definition at line 19 of file Determinant.h.

{
  return matrix.coeff(0,a)
         * (matrix.coeff(1,b) * matrix.coeff(2,c) - matrix.coeff(1,c) * matrix.coeff(2,b));
}
template<typename Derived >
const Derived::Scalar Eigen::internal::bruteforce_det4_helper ( const MatrixBase< Derived > &  matrix,
int  j,
int  k,
int  m,
int  n 
)

Definition at line 27 of file Determinant.h.

{
  return (matrix.coeff(j,0) * matrix.coeff(k,1) - matrix.coeff(k,0) * matrix.coeff(j,1))
       * (matrix.coeff(m,2) * matrix.coeff(n,3) - matrix.coeff(n,2) * matrix.coeff(m,3));
}
template<typename MatrixType >
void Eigen::internal::c_to_fortran_numbering ( MatrixType &  mat)

Definition at line 97 of file PaStiXSupport.h.

  {
    if ( !(mat.outerIndexPtr()[0]) ) 
    { 
      int i;
      for(i = 0; i <= mat.rows(); ++i)
        ++mat.outerIndexPtr()[i];
      for(i = 0; i < mat.nonZeros(); ++i)
        ++mat.innerIndexPtr()[i];
    }
  }
template<typename Dst , typename Src >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_assignment ( Dst &  dst,
const Src &  src 
)

Definition at line 692 of file AssignEvaluator.h.

{
  call_assignment(dst, src, internal::assign_op<typename Dst::Scalar>());
}
template<typename Dst , typename Src >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_assignment ( const Dst &  dst,
const Src &  src 
)

Definition at line 698 of file AssignEvaluator.h.

{
  call_assignment(dst, src, internal::assign_op<typename Dst::Scalar>());
}
template<typename Dst , typename Src , typename Func >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_assignment ( Dst &  dst,
const Src &  src,
const Func &  func,
typename enable_if< evaluator_assume_aliasing< Src >::value, void * >::type  = 0 
)

Definition at line 706 of file AssignEvaluator.h.

{
  typename plain_matrix_type<Src>::type tmp(src);
  call_assignment_no_alias(dst, tmp, func);
}
template<typename Dst , typename Src , typename Func >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_assignment ( Dst &  dst,
const Src &  src,
const Func &  func,
typename enable_if<!evaluator_assume_aliasing< Src >::value, void * >::type  = 0 
)

Definition at line 714 of file AssignEvaluator.h.

{
  call_assignment_no_alias(dst, src, func);
}
template<typename Dst , template< typename > class StorageBase, typename Src , typename Func >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_assignment ( NoAlias< Dst, StorageBase > &  dst,
const Src &  src,
const Func &  func 
)

Definition at line 723 of file AssignEvaluator.h.

{
  call_assignment_no_alias(dst.expression(), src, func);
}
template<typename Dst , typename Src , typename Func >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_assignment_no_alias ( Dst &  dst,
const Src &  src,
const Func &  func 
)

Definition at line 731 of file AssignEvaluator.h.

{
  enum {
    NeedToTranspose = (    (int(Dst::RowsAtCompileTime) == 1 && int(Src::ColsAtCompileTime) == 1)
                        || (int(Dst::ColsAtCompileTime) == 1 && int(Src::RowsAtCompileTime) == 1)
                      ) && int(Dst::SizeAtCompileTime) != 1
  };

  Index dstRows = NeedToTranspose ? src.cols() : src.rows();
  Index dstCols = NeedToTranspose ? src.rows() : src.cols();
  if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
    dst.resize(dstRows, dstCols);
  
  typedef typename internal::conditional<NeedToTranspose, Transpose<Dst>, Dst>::type ActualDstTypeCleaned;
  typedef typename internal::conditional<NeedToTranspose, Transpose<Dst>, Dst&>::type ActualDstType;
  ActualDstType actualDst(dst);
  
  // TODO check whether this is the right place to perform these checks:
  EIGEN_STATIC_ASSERT_LVALUE(Dst)
  EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(ActualDstTypeCleaned,Src)
  EIGEN_CHECK_BINARY_COMPATIBILIY(Func,typename ActualDstTypeCleaned::Scalar,typename Src::Scalar);
  
  Assignment<ActualDstTypeCleaned,Src,Func>::run(actualDst, src, func);
}
template<typename Dst , typename Src >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_assignment_no_alias ( Dst &  dst,
const Src &  src 
)

Definition at line 757 of file AssignEvaluator.h.

{
  call_assignment_no_alias(dst, src, internal::assign_op<typename Dst::Scalar>());
}
template<typename Dst , typename Src , typename Func >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_assignment_no_alias_no_transpose ( Dst &  dst,
const Src &  src,
const Func &  func 
)

Definition at line 764 of file AssignEvaluator.h.

{
  Index dstRows = src.rows();
  Index dstCols = src.cols();
  if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
    dst.resize(dstRows, dstCols);
  
  // TODO check whether this is the right place to perform these checks:
  EIGEN_STATIC_ASSERT_LVALUE(Dst)
  EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Dst,Src)
  
  Assignment<Dst,Src,Func>::run(dst, src, func);
}
template<typename Dst , typename Src >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_assignment_no_alias_no_transpose ( Dst &  dst,
const Src &  src 
)

Definition at line 779 of file AssignEvaluator.h.

{
  call_assignment_no_alias_no_transpose(dst, src, internal::assign_op<typename Dst::Scalar>());
}
template<typename DstXprType , typename SrcXprType , typename Functor >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_dense_assignment_loop ( const DstXprType &  dst,
const SrcXprType &  src,
const Functor &  func 
)

Definition at line 640 of file AssignEvaluator.h.

{
  eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
  
  typedef evaluator<DstXprType> DstEvaluatorType;
  typedef evaluator<SrcXprType> SrcEvaluatorType;

  DstEvaluatorType dstEvaluator(dst);
  SrcEvaluatorType srcEvaluator(src);
    
  typedef generic_dense_assignment_kernel<DstEvaluatorType,SrcEvaluatorType,Functor> Kernel;
  Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived());
  
  dense_assignment_loop<Kernel>::run(kernel);
}
template<typename DstXprType , typename SrcXprType >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_dense_assignment_loop ( const DstXprType &  dst,
const SrcXprType &  src 
)

Definition at line 657 of file AssignEvaluator.h.

{
  call_dense_assignment_loop(dst, src, internal::assign_op<typename DstXprType::Scalar>());
}
template<int Mode, bool SetOpposite, typename DstXprType , typename SrcXprType , typename Functor >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_triangular_assignment_loop ( const DstXprType &  dst,
const SrcXprType &  src,
const Functor &  func 
)

Definition at line 780 of file TriangularMatrix.h.

{
  eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
  
  typedef evaluator<DstXprType> DstEvaluatorType;
  typedef evaluator<SrcXprType> SrcEvaluatorType;

  DstEvaluatorType dstEvaluator(dst);
  SrcEvaluatorType srcEvaluator(src);
    
  typedef triangular_dense_assignment_kernel< Mode&(Lower|Upper),Mode&(UnitDiag|ZeroDiag|SelfAdjoint),SetOpposite,
                                              DstEvaluatorType,SrcEvaluatorType,Functor> Kernel;
  Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived());
  
  enum {
      unroll = DstXprType::SizeAtCompileTime != Dynamic
            && SrcEvaluatorType::CoeffReadCost < HugeCost
            && DstXprType::SizeAtCompileTime * SrcEvaluatorType::CoeffReadCost / 2 <= EIGEN_UNROLLING_LIMIT
    };
  
  triangular_assignment_loop<Kernel, Mode, unroll ? int(DstXprType::SizeAtCompileTime) : Dynamic, SetOpposite>::run(kernel);
}
template<int Mode, bool SetOpposite, typename DstXprType , typename SrcXprType >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_triangular_assignment_loop ( const DstXprType &  dst,
const SrcXprType &  src 
)

Definition at line 805 of file TriangularMatrix.h.

{
  call_triangular_assignment_loop<Mode,SetOpposite>(dst, src, internal::assign_op<typename DstXprType::Scalar>());
}
static void Eigen::internal::check_DenseIndex_is_signed ( ) [inline, static]

Definition at line 20 of file DenseBase.h.

                                                {
  EIGEN_STATIC_ASSERT(NumTraits<DenseIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); 
}
template<typename Dst , typename Src >
void Eigen::internal::check_for_aliasing ( const Dst &  dst,
const Src &  src 
)

Definition at line 387 of file Transpose.h.

{
  internal::checkTransposeAliasing_impl<Dst, Src>::run(dst, src);
}
template<typename T >
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void Eigen::internal::check_size_for_overflow ( size_t  size)

Definition at line 285 of file Memory.h.

{
  if(size > size_t(-1) / sizeof(T))
    throw_std_bad_alloc();
}
template<typename T , int Size>
EIGEN_DEVICE_FUNC void Eigen::internal::check_static_allocation_size ( )

Definition at line 29 of file DenseStorage.h.

{
  // if EIGEN_STACK_ALLOCATION_LIMIT is defined to 0, then no limit
  #if EIGEN_STACK_ALLOCATION_LIMIT
  EIGEN_STATIC_ASSERT(Size * sizeof(T) <= EIGEN_STACK_ALLOCATION_LIMIT, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
  #endif
}
EIGEN_DEVICE_FUNC void Eigen::internal::check_that_malloc_is_allowed ( ) [inline]

Definition at line 146 of file Memory.h.

{}
template<typename Scalar , typename CholmodType >
void Eigen::internal::cholmod_configure_matrix ( CholmodType &  mat)

Definition at line 18 of file CholmodSupport.h.

{
  if (internal::is_same<Scalar,float>::value)
  {
    mat.xtype = CHOLMOD_REAL;
    mat.dtype = CHOLMOD_SINGLE;
  }
  else if (internal::is_same<Scalar,double>::value)
  {
    mat.xtype = CHOLMOD_REAL;
    mat.dtype = CHOLMOD_DOUBLE;
  }
  else if (internal::is_same<Scalar,std::complex<float> >::value)
  {
    mat.xtype = CHOLMOD_COMPLEX;
    mat.dtype = CHOLMOD_SINGLE;
  }
  else if (internal::is_same<Scalar,std::complex<double> >::value)
  {
    mat.xtype = CHOLMOD_COMPLEX;
    mat.dtype = CHOLMOD_DOUBLE;
  }
  else
  {
    eigen_assert(false && "Scalar type not supported by CHOLMOD");
  }
}
template<typename IndexType >
static IndexType Eigen::internal::clear_mark ( IndexType  n_row,
Colamd_Row< IndexType >  Row[] 
) [inline, static]

Definition at line 1821 of file Ordering.h.

template<typename MatrixType , int i, int j>
EIGEN_DEVICE_FUNC MatrixType::Scalar Eigen::internal::cofactor_3x3 ( const MatrixType &  m) [inline]

Definition at line 126 of file InverseImpl.h.

{
  enum {
    i1 = (i+1) % 3,
    i2 = (i+2) % 3,
    j1 = (j+1) % 3,
    j2 = (j+2) % 3
  };
  return m.coeff(i1, j1) * m.coeff(i2, j2)
       - m.coeff(i1, j2) * m.coeff(i2, j1);
}
template<typename MatrixType , int i, int j>
EIGEN_DEVICE_FUNC MatrixType::Scalar Eigen::internal::cofactor_4x4 ( const MatrixType &  matrix) [inline]

Definition at line 213 of file InverseImpl.h.

{
  enum {
    i1 = (i+1) % 4,
    i2 = (i+2) % 4,
    i3 = (i+3) % 4,
    j1 = (j+1) % 4,
    j2 = (j+2) % 4,
    j3 = (j+3) % 4
  };
  return general_det3_helper(matrix, i1, i2, i3, j1, j2, j3)
       + general_det3_helper(matrix, i2, i3, i1, j1, j2, j3)
       + general_det3_helper(matrix, i3, i1, i2, j1, j2, j3);
}
template<typename IndexType >
static bool Eigen::internal::colamd ( IndexType  n_row,
IndexType  n_col,
IndexType  Alen,
IndexType *  A,
IndexType *  p,
double  knobs[COLAMD_KNOBS],
IndexType  stats[COLAMD_STATS] 
) [static]

Computes a column ordering using the column approximate minimum degree ordering.

Computes a column ordering (Q) of A such that P(AQ)=LU or (AQ)'AQ=LL' have less fill-in and require fewer floating point operations than factorizing the unpermuted matrix A or A'A, respectively.

Parameters:
n_rownumber of rows in A
n_colnumber of columns in A
Alen,sizeof the array A
Arow indices of the matrix, of size ALen
pcolumn pointers of A, of size n_col+1
knobsparameter settings for colamd
statscolamd output statistics and error codes

Definition at line 323 of file Ordering.h.

template<typename IndexType >
IndexType Eigen::internal::colamd_c ( IndexType  n_col) [inline]

Definition at line 203 of file Ordering.h.

template<typename IndexType >
IndexType Eigen::internal::colamd_r ( IndexType  n_row) [inline]

Definition at line 207 of file Ordering.h.

template<typename IndexType >
IndexType Eigen::internal::colamd_recommended ( IndexType  nnz,
IndexType  n_row,
IndexType  n_col 
) [inline]

Returns the recommended value of Alen.

Returns recommended value of Alen for use by colamd. Returns -1 if any input argument is negative. The use of this routine or macro is optional. Note that the macro uses its arguments more than once, so be careful for side effects, if you pass expressions as arguments to COLAMD_RECOMMENDED.

Parameters:
nnznonzeros in A
n_rownumber of rows in A
n_colnumber of columns in A
Returns:
recommended value of Alen for use by colamd

Definition at line 258 of file Ordering.h.

static void Eigen::internal::colamd_set_defaults ( double  knobs[COLAMD_KNOBS]) [inline, static]

set default parameters The use of this routine is optional.

Colamd: rows with more than (knobs [COLAMD_DENSE_ROW] * n_col) entries are removed prior to ordering. Columns with more than (knobs [COLAMD_DENSE_COL] * n_row) entries are removed prior to ordering, and placed last in the output column ordering.

COLAMD_DENSE_ROW and COLAMD_DENSE_COL are defined as 0 and 1, respectively, in colamd.h. Default values of these two knobs are both 0.5. Currently, only knobs [0] and knobs [1] are used, but future versions may use more knobs. If so, they will be properly set to their defaults by the future version of colamd_set_defaults, so that the code that calls colamd will not need to change, assuming that you either use colamd_set_defaults, or pass a (double *) NULL pointer as the knobs array to colamd or symamd.

Parameters:
knobsparameter settings for colamd

Definition at line 287 of file Ordering.h.

template<typename MatrixType , typename IndexVector >
int Eigen::internal::coletree ( const MatrixType &  mat,
IndexVector &  parent,
IndexVector &  firstRowElt,
typename MatrixType::StorageIndex *  perm = 0 
)

Compute the column elimination tree of a sparse matrix

Parameters:
matThe matrix in column-major format.
parentThe elimination tree
firstRowEltThe column index of the first element in each row
permThe permutation to apply to the column of mat

Definition at line 61 of file SparseColEtree.h.

{
  typedef typename MatrixType::StorageIndex StorageIndex;
  StorageIndex nc = convert_index<StorageIndex>(mat.cols()); // Number of columns
  StorageIndex m = convert_index<StorageIndex>(mat.rows());
  StorageIndex diagSize = (std::min)(nc,m);
  IndexVector root(nc); // root of subtree of etree 
  root.setZero();
  IndexVector pp(nc); // disjoint sets 
  pp.setZero(); // Initialize disjoint sets 
  parent.resize(mat.cols());
  //Compute first nonzero column in each row 
  firstRowElt.resize(m);
  firstRowElt.setConstant(nc);
  firstRowElt.segment(0, diagSize).setLinSpaced(diagSize, 0, diagSize-1);
  bool found_diag;
  for (StorageIndex col = 0; col < nc; col++)
  {
    StorageIndex pcol = col;
    if(perm) pcol  = perm[col];
    for (typename MatrixType::InnerIterator it(mat, pcol); it; ++it)
    { 
      Index row = it.row();
      firstRowElt(row) = (std::min)(firstRowElt(row), col);
    }
  }
  /* Compute etree by Liu's algorithm for symmetric matrices,
          except use (firstRowElt[r],c) in place of an edge (r,c) of A.
    Thus each row clique in A'*A is replaced by a star
    centered at its first vertex, which has the same fill. */
  StorageIndex rset, cset, rroot;
  for (StorageIndex col = 0; col < nc; col++) 
  {
    found_diag = col>=m;
    pp(col) = col; 
    cset = col; 
    root(cset) = col; 
    parent(col) = nc; 
    /* The diagonal element is treated here even if it does not exist in the matrix
     * hence the loop is executed once more */ 
    StorageIndex pcol = col;
    if(perm) pcol  = perm[col];
    for (typename MatrixType::InnerIterator it(mat, pcol); it||!found_diag; ++it)
    { //  A sequence of interleaved find and union is performed 
      Index i = col;
      if(it) i = it.index();
      if (i == col) found_diag = true;
      
      StorageIndex row = firstRowElt(i);
      if (row >= col) continue; 
      rset = internal::etree_find(row, pp); // Find the name of the set containing row
      rroot = root(rset);
      if (rroot != col) 
      {
        parent(rroot) = col; 
        pp(cset) = rset; 
        cset = rset; 
        root(cset) = col; 
      }
    }
  }
  return 0;  
}
template<typename MatrixType , typename ResultType >
EIGEN_DEVICE_FUNC void Eigen::internal::compute_inverse_size2_helper ( const MatrixType &  matrix,
const typename ResultType::Scalar &  invdet,
ResultType &  result 
) [inline]

Definition at line 76 of file InverseImpl.h.

{
  result.coeffRef(0,0) =  matrix.coeff(1,1) * invdet;
  result.coeffRef(1,0) = -matrix.coeff(1,0) * invdet;
  result.coeffRef(0,1) = -matrix.coeff(0,1) * invdet;
  result.coeffRef(1,1) =  matrix.coeff(0,0) * invdet;
}
template<typename MatrixType , typename ResultType >
EIGEN_DEVICE_FUNC void Eigen::internal::compute_inverse_size3_helper ( const MatrixType &  matrix,
const typename ResultType::Scalar &  invdet,
const Matrix< typename ResultType::Scalar, 3, 1 > &  cofactors_col0,
ResultType &  result 
) [inline]

Definition at line 140 of file InverseImpl.h.

{
  result.row(0) = cofactors_col0 * invdet;
  result.coeffRef(1,0) =  cofactor_3x3<MatrixType,0,1>(matrix) * invdet;
  result.coeffRef(1,1) =  cofactor_3x3<MatrixType,1,1>(matrix) * invdet;
  result.coeffRef(1,2) =  cofactor_3x3<MatrixType,2,1>(matrix) * invdet;
  result.coeffRef(2,0) =  cofactor_3x3<MatrixType,0,2>(matrix) * invdet;
  result.coeffRef(2,1) =  cofactor_3x3<MatrixType,1,2>(matrix) * invdet;
  result.coeffRef(2,2) =  cofactor_3x3<MatrixType,2,2>(matrix) * invdet;
}
template<typename MatrixType , typename DiagType , typename SubDiagType >
ComputationInfo Eigen::internal::computeFromTridiagonal_impl ( DiagType &  diag,
SubDiagType &  subdiag,
const Index  maxIterations,
bool  computeEigenvectors,
MatrixType &  eivec 
)

Compute the eigendecomposition from a tridiagonal matrix.

Parameters:
[in,out]diag: On input, the diagonal of the matrix, on output the eigenvalues
[in,out]subdiag: The subdiagonal part of the matrix (entries are modified during the decomposition)
[in]maxIterations: the maximum number of iterations
[in]computeEigenvectors: whether the eigenvectors have to be computed or not
[out]eivec: The matrix to store the eigenvectors if computeEigenvectors==true. Must be allocated on input.
Returns:
Success or NoConvergence

Definition at line 481 of file SelfAdjointEigenSolver.h.

{
  using std::abs;

  ComputationInfo info;
  typedef typename MatrixType::Scalar Scalar;

  Index n = diag.size();
  Index end = n-1;
  Index start = 0;
  Index iter = 0; // total number of iterations
  
  typedef typename DiagType::RealScalar RealScalar;
  const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();
  
  while (end>0)
  {
    for (Index i = start; i<end; ++i)
      if (internal::isMuchSmallerThan(abs(subdiag[i]),(abs(diag[i])+abs(diag[i+1]))) || abs(subdiag[i]) <= considerAsZero)
        subdiag[i] = 0;

    // find the largest unreduced block
    while (end>0 && subdiag[end-1]==0)
    {
      end--;
    }
    if (end<=0)
      break;

    // if we spent too many iterations, we give up
    iter++;
    if(iter > maxIterations * n) break;

    start = end - 1;
    while (start>0 && subdiag[start-1]!=0)
      start--;

    internal::tridiagonal_qr_step<MatrixType::Flags&RowMajorBit ? RowMajor : ColMajor>(diag.data(), subdiag.data(), start, end, computeEigenvectors ? eivec.data() : (Scalar*)0, n);
  }
  if (iter <= maxIterations * n)
    info = Success;
  else
    info = NoConvergence;

  // Sort eigenvalues and corresponding vectors.
  // TODO make the sort optional ?
  // TODO use a better sort algorithm !!
  if (info == Success)
  {
    for (Index i = 0; i < n-1; ++i)
    {
      Index k;
      diag.segment(i,n-i).minCoeff(&k);
      if (k > 0)
      {
        std::swap(diag[i], diag[k+i]);
        if(computeEigenvectors)
          eivec.col(i).swap(eivec.col(k+i));
      }
    }
  }
  return info;
}
template<typename LhsScalar , typename RhsScalar , int KcFactor>
void Eigen::internal::computeProductBlockingSizes ( Index &  k,
Index &  m,
Index &  n,
Index  num_threads = 1 
)

Computes the blocking parameters for a m x k times k x n matrix product.

Parameters:
[in,out]kInput: the third dimension of the product. Output: the blocking size along the same dimension.
[in,out]mInput: the number of rows of the left hand side. Output: the blocking size along the same dimension.
[in,out]nInput: the number of columns of the right hand side. Output: the blocking size along the same dimension.

Given a m x k times k x n matrix product of scalar types LhsScalar and RhsScalar, this function computes the blocking size parameters along the respective dimensions for matrix products and related algorithms.

The blocking size parameters may be evaluated:

  • either by a heuristic based on cache sizes;
  • or using fixed prescribed values (for testing purposes).
See also:
setCpuCacheSizes

Definition at line 300 of file GeneralBlockPanelKernel.h.

{
  if (!useSpecificBlockingSizes(k, m, n)) {
    evaluateProductBlockingSizesHeuristic<LhsScalar, RhsScalar, KcFactor>(k, m, n, num_threads);
  }

  typedef gebp_traits<LhsScalar,RhsScalar> Traits;
  enum {
    kr = 8,
    mr = Traits::mr,
    nr = Traits::nr
  };
  if (k > kr) k -= k % kr;
  if (m > mr) m -= m % mr;
  if (n > nr) n -= n % nr;
}
template<typename LhsScalar , typename RhsScalar >
void Eigen::internal::computeProductBlockingSizes ( Index &  k,
Index &  m,
Index &  n,
Index  num_threads = 1 
) [inline]

Definition at line 318 of file GeneralBlockPanelKernel.h.

{
  computeProductBlockingSizes<LhsScalar,RhsScalar,1>(k, m, n, num_threads);
}
template<typename T , bool Align>
EIGEN_DEVICE_FUNC void Eigen::internal::conditional_aligned_delete ( T ptr,
size_t  size 
) [inline]

Deletes objects constructed with conditional_aligned_new The size parameters tells on how many objects to call the destructor of T.

Definition at line 337 of file Memory.h.

{
  destruct_elements_of_array<T>(ptr, size);
  conditional_aligned_free<Align>(ptr);
}
template<typename T , bool Align>
EIGEN_DEVICE_FUNC void Eigen::internal::conditional_aligned_delete_auto ( T ptr,
size_t  size 
) [inline]

Definition at line 409 of file Memory.h.

{
  if(NumTraits<T>::RequireInitialization)
    destruct_elements_of_array<T>(ptr, size);
  conditional_aligned_free<Align>(ptr);
}
template<bool Align>
EIGEN_DEVICE_FUNC void Eigen::internal::conditional_aligned_free ( void *  ptr) [inline]

Frees memory allocated with conditional_aligned_malloc

Definition at line 228 of file Memory.h.

{
  aligned_free(ptr);
}
template<>
EIGEN_DEVICE_FUNC void Eigen::internal::conditional_aligned_free< false > ( void *  ptr) [inline]

Definition at line 233 of file Memory.h.

{
  std::free(ptr);
}
template<bool Align>
EIGEN_DEVICE_FUNC void* Eigen::internal::conditional_aligned_malloc ( size_t  size) [inline]

Allocates size bytes. If Align is true, then the returned ptr is 16-byte-aligned. On allocation error, the returned pointer is null, and a std::bad_alloc is thrown.

Definition at line 212 of file Memory.h.

{
  return aligned_malloc(size);
}
template<>
EIGEN_DEVICE_FUNC void* Eigen::internal::conditional_aligned_malloc< false > ( size_t  size) [inline]

Definition at line 217 of file Memory.h.

{
  check_that_malloc_is_allowed();

  void *result = std::malloc(size);
  if(!result && size)
    throw_std_bad_alloc();
  return result;
}
template<typename T , bool Align>
EIGEN_DEVICE_FUNC T* Eigen::internal::conditional_aligned_new ( size_t  size) [inline]

Definition at line 310 of file Memory.h.

{
  check_size_for_overflow<T>(size);
  T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
  EIGEN_TRY
  {
    return construct_elements_of_array(result, size);
  }
  EIGEN_CATCH(...)
  {
    conditional_aligned_free<Align>(result);
    EIGEN_THROW;
  }
}
template<typename T , bool Align>
EIGEN_DEVICE_FUNC T* Eigen::internal::conditional_aligned_new_auto ( size_t  size) [inline]

Definition at line 366 of file Memory.h.

{
  if(size==0)
    return 0; // short-cut. Also fixes Bug 884
  check_size_for_overflow<T>(size);
  T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
  if(NumTraits<T>::RequireInitialization)
  {
    EIGEN_TRY
    {
      construct_elements_of_array(result, size);
    }
    EIGEN_CATCH(...)
    {
      conditional_aligned_free<Align>(result);
      EIGEN_THROW;
    }
  }
  return result;
}
template<bool Align>
void* Eigen::internal::conditional_aligned_realloc ( void *  ptr,
size_t  new_size,
size_t  old_size 
) [inline]

Definition at line 238 of file Memory.h.

{
  return aligned_realloc(ptr, new_size, old_size);
}
template<>
void* Eigen::internal::conditional_aligned_realloc< false > ( void *  ptr,
size_t  new_size,
size_t   
) [inline]

Definition at line 243 of file Memory.h.

{
  return std::realloc(ptr, new_size);
}
template<typename T , bool Align>
EIGEN_DEVICE_FUNC T* Eigen::internal::conditional_aligned_realloc_new ( T pts,
size_t  new_size,
size_t  old_size 
) [inline]

Definition at line 343 of file Memory.h.

{
  check_size_for_overflow<T>(new_size);
  check_size_for_overflow<T>(old_size);
  if(new_size < old_size)
    destruct_elements_of_array(pts+new_size, old_size-new_size);
  T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));
  if(new_size > old_size)
  {
    EIGEN_TRY
    {
      construct_elements_of_array(result+old_size, new_size-old_size);
    }
    EIGEN_CATCH(...)
    {
      conditional_aligned_free<Align>(result);
      EIGEN_THROW;
    }
  }
  return result;
}
template<typename T , bool Align>
T* Eigen::internal::conditional_aligned_realloc_new_auto ( T pts,
size_t  new_size,
size_t  old_size 
) [inline]

Definition at line 387 of file Memory.h.

{
  check_size_for_overflow<T>(new_size);
  check_size_for_overflow<T>(old_size);
  if(NumTraits<T>::RequireInitialization && (new_size < old_size))
    destruct_elements_of_array(pts+new_size, old_size-new_size);
  T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));
  if(NumTraits<T>::RequireInitialization && (new_size > old_size))
  {
    EIGEN_TRY
    {
      construct_elements_of_array(result+old_size, new_size-old_size);
    }
    EIGEN_CATCH(...)
    {
      conditional_aligned_free<Align>(result);
      EIGEN_THROW;
    }
  }
  return result;
}
template<typename MatrixType , typename Rhs , typename Dest , typename Preconditioner >
EIGEN_DONT_INLINE void Eigen::internal::conjugate_gradient ( const MatrixType &  mat,
const Rhs &  rhs,
Dest &  x,
const Preconditioner &  precond,
Index &  iters,
typename Dest::RealScalar &  tol_error 
)

Low-level conjugate gradient algorithm

Parameters:
matThe matrix A
rhsThe right hand side vector b
xOn input and initial solution, on output the computed solution.
precondA preconditioner being able to efficiently solve for an approximation of Ax=b (regardless of b)
itersOn input the max number of iteration, on output the number of performed iterations.
tol_errorOn input the tolerance error, on output an estimation of the relative error.

Definition at line 28 of file ConjugateGradient.h.

{
  using std::sqrt;
  using std::abs;
  typedef typename Dest::RealScalar RealScalar;
  typedef typename Dest::Scalar Scalar;
  typedef Matrix<Scalar,Dynamic,1> VectorType;
  
  RealScalar tol = tol_error;
  Index maxIters = iters;
  
  Index n = mat.cols();

  VectorType residual = rhs - mat * x; //initial residual

  RealScalar rhsNorm2 = rhs.squaredNorm();
  if(rhsNorm2 == 0) 
  {
    x.setZero();
    iters = 0;
    tol_error = 0;
    return;
  }
  RealScalar threshold = tol*tol*rhsNorm2;
  RealScalar residualNorm2 = residual.squaredNorm();
  if (residualNorm2 < threshold)
  {
    iters = 0;
    tol_error = sqrt(residualNorm2 / rhsNorm2);
    return;
  }
  
  VectorType p(n);
  p = precond.solve(residual);      // initial search direction

  VectorType z(n), tmp(n);
  RealScalar absNew = numext::real(residual.dot(p));  // the square of the absolute value of r scaled by invM
  Index i = 0;
  while(i < maxIters)
  {
    tmp.noalias() = mat * p;                    // the bottleneck of the algorithm

    Scalar alpha = absNew / p.dot(tmp);         // the amount we travel on dir
    x += alpha * p;                             // update solution
    residual -= alpha * tmp;                    // update residual
    
    residualNorm2 = residual.squaredNorm();
    if(residualNorm2 < threshold)
      break;
    
    z = precond.solve(residual);                // approximately solve for "A z = residual"

    RealScalar absOld = absNew;
    absNew = numext::real(residual.dot(z));     // update the absolute value of r
    RealScalar beta = absNew / absOld;          // calculate the Gram-Schmidt value used to create the new search direction
    p = z + beta * p;                           // update search direction
    i++;
  }
  tol_error = sqrt(residualNorm2 / rhsNorm2);
  iters = i;
}
template<typename Lhs , typename Rhs , typename ResultType >
static void Eigen::internal::conservative_sparse_sparse_product_impl ( const Lhs &  lhs,
const Rhs &  rhs,
ResultType &  res,
bool  sortedInsertion = false 
) [static]

Definition at line 18 of file ConservativeSparseSparseProduct.h.

{
  typedef typename remove_all<Lhs>::type::Scalar Scalar;

  // make sure to call innerSize/outerSize since we fake the storage order.
  Index rows = lhs.innerSize();
  Index cols = rhs.outerSize();
  eigen_assert(lhs.outerSize() == rhs.innerSize());
  
  ei_declare_aligned_stack_constructed_variable(bool,   mask,     rows, 0);
  ei_declare_aligned_stack_constructed_variable(Scalar, values,   rows, 0);
  ei_declare_aligned_stack_constructed_variable(Index,  indices,  rows, 0);
  
  std::memset(mask,0,sizeof(bool)*rows);

  evaluator<Lhs> lhsEval(lhs);
  evaluator<Rhs> rhsEval(rhs);
  
  // estimate the number of non zero entries
  // given a rhs column containing Y non zeros, we assume that the respective Y columns
  // of the lhs differs in average of one non zeros, thus the number of non zeros for
  // the product of a rhs column with the lhs is X+Y where X is the average number of non zero
  // per column of the lhs.
  // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
  Index estimated_nnz_prod = lhsEval.nonZerosEstimate() + rhsEval.nonZerosEstimate();

  res.setZero();
  res.reserve(Index(estimated_nnz_prod));
  // we compute each column of the result, one after the other
  for (Index j=0; j<cols; ++j)
  {

    res.startVec(j);
    Index nnz = 0;
    for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
    {
      Scalar y = rhsIt.value();
      Index k = rhsIt.index();
      for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, k); lhsIt; ++lhsIt)
      {
        Index i = lhsIt.index();
        Scalar x = lhsIt.value();
        if(!mask[i])
        {
          mask[i] = true;
          values[i] = x * y;
          indices[nnz] = i;
          ++nnz;
        }
        else
          values[i] += x * y;
      }
    }
    if(!sortedInsertion)
    {
      // unordered insertion
      for(Index k=0; k<nnz; ++k)
      {
        Index i = indices[k];
        res.insertBackByOuterInnerUnordered(j,i) = values[i];
        mask[i] = false;
      }
    }
    else
    {
      // alternative ordered insertion code:
      const Index t200 = rows/11; // 11 == (log2(200)*1.39)
      const Index t = (rows*100)/139;

      // FIXME reserve nnz non zeros
      // FIXME implement faster sorting algorithms for very small nnz
      // if the result is sparse enough => use a quick sort
      // otherwise => loop through the entire vector
      // In order to avoid to perform an expensive log2 when the
      // result is clearly very sparse we use a linear bound up to 200.
      if((nnz<200 && nnz<t200) || nnz * numext::log2(int(nnz)) < t)
      {
        if(nnz>1) std::sort(indices,indices+nnz);
        for(Index k=0; k<nnz; ++k)
        {
          Index i = indices[k];
          res.insertBackByOuterInner(j,i) = values[i];
          mask[i] = false;
        }
      }
      else
      {
        // dense path
        for(Index i=0; i<rows; ++i)
        {
          if(mask[i])
          {
            mask[i] = false;
            res.insertBackByOuterInner(j,i) = values[i];
          }
        }
      }
    }
  }
  res.finalize();
}
template<typename T >
EIGEN_DEVICE_FUNC T* Eigen::internal::const_cast_ptr ( const T ptr) [inline]

Definition at line 421 of file XprHelper.h.

{
  return const_cast<T*>(ptr);
}
template<typename T >
EIGEN_DEVICE_FUNC T* Eigen::internal::construct_elements_of_array ( T ptr,
size_t  size 
) [inline]

Constructs the elements of an array. The size parameter tells on how many objects to call the constructor of T.

Definition at line 265 of file Memory.h.

{
  size_t i;
  EIGEN_TRY
  {
      for (i = 0; i < size; ++i) ::new (ptr + i) T;
      return ptr;
  }
  EIGEN_CATCH(...)
  {
    destruct_elements_of_array(ptr, i);
    EIGEN_THROW;
  }
}
template<typename IndexDest , typename IndexSrc >
EIGEN_DEVICE_FUNC IndexDest Eigen::internal::convert_index ( const IndexSrc &  idx) [inline]

Definition at line 41 of file XprHelper.h.

                                                    {
  // for sizeof(IndexDest)>=sizeof(IndexSrc) compilers should be able to optimize this away:
  eigen_internal_assert(idx <= NumTraits<IndexDest>::highest() && "Index value to big for target type");
  return IndexDest(idx);
}
bool Eigen::internal::copy_bool ( bool  b) [inline]

Definition at line 490 of file Macros.h.

{ return b; }
template<typename StorageIndex >
StorageIndex Eigen::internal::cs_tdfs ( StorageIndex  j,
StorageIndex  k,
StorageIndex *  head,
const StorageIndex *  next,
StorageIndex *  post,
StorageIndex *  stack 
)

Definition at line 60 of file Amd.h.

{
  StorageIndex i, p, top = 0;
  if(!head || !next || !post || !stack) return (-1);    /* check inputs */
  stack[0] = j;                 /* place j on the stack */
  while (top >= 0)                /* while (stack is not empty) */
  {
    p = stack[top];           /* p = top of stack */
    i = head[p];              /* i = youngest child of p */
    if(i == -1)
    {
      top--;                 /* p has no unordered children left */
      post[k++] = p;        /* node p is the kth postordered node */
    }
    else
    {
      head[p] = next[i];   /* remove i from children of p */
      stack[++top] = i;     /* start dfs on child node i */
    }
  }
  return k;
}
template<typename StorageIndex >
static StorageIndex Eigen::internal::cs_wclear ( StorageIndex  mark,
StorageIndex  lemax,
StorageIndex *  w,
StorageIndex  n 
) [static]

Definition at line 45 of file Amd.h.

{
  StorageIndex k;
  if(mark < 2 || (mark + lemax < 0))
  {
    for(k = 0; k < n; k++)
      if(w[k] != 0)
        w[k] = 1;
    mark = 2;
  }
  return (mark);     /* at this point, w[0..n-1] < mark holds */
}
template<typename T >
EIGEN_DEVICE_FUNC void Eigen::internal::destruct_elements_of_array ( T ptr,
size_t  size 
) [inline]

Destructs the elements of an array. The size parameters tells on how many objects to call the destructor of T.

Definition at line 255 of file Memory.h.

{
  // always destruct an array starting from the end.
  if(ptr)
    while(size) ptr[--size].~T();
}
template<typename IndexType >
static void Eigen::internal::detect_super_cols ( colamd_col< IndexType >  Col[],
IndexType  A[],
IndexType  head[],
IndexType  row_start,
IndexType  row_length 
) [static]

Definition at line 1549 of file Ordering.h.

Eigen::internal::EIGEN_MEMBER_FUNCTOR ( squaredNorm  ,
Size *NumTraits< Scalar >::MulCost+(Size-1)*NumTraits< Scalar >::AddCost   
)
Eigen::internal::EIGEN_MEMBER_FUNCTOR ( norm  ,
(Size+5)*NumTraits< Scalar >::MulCost+(Size-1)*NumTraits< Scalar >::AddCost   
)
Eigen::internal::EIGEN_MEMBER_FUNCTOR ( stableNorm  ,
(Size+5)*NumTraits< Scalar >::MulCost+(Size-1)*NumTraits< Scalar >::AddCost   
)
Eigen::internal::EIGEN_MEMBER_FUNCTOR ( blueNorm  ,
(Size+5)*NumTraits< Scalar >::MulCost+(Size-1)*NumTraits< Scalar >::AddCost   
)
Eigen::internal::EIGEN_MEMBER_FUNCTOR ( hypotNorm  ,
(Size-1)*functor_traits< scalar_hypot_op< Scalar > >::Cost   
)
Eigen::internal::EIGEN_MEMBER_FUNCTOR ( sum  ,
(Size-1)*NumTraits< Scalar >::AddCost   
)
Eigen::internal::EIGEN_MEMBER_FUNCTOR ( mean  ,
(Size-1)*NumTraits< Scalar >::AddCost+NumTraits< Scalar >::MulCost   
)
Eigen::internal::EIGEN_MEMBER_FUNCTOR ( minCoeff  ,
(Size-1)*NumTraits< Scalar >::AddCost   
)
Eigen::internal::EIGEN_MEMBER_FUNCTOR ( maxCoeff  ,
(Size-1)*NumTraits< Scalar >::AddCost   
)
Eigen::internal::EIGEN_MEMBER_FUNCTOR ( all  ,
(Size-1)*NumTraits< Scalar >::AddCost   
)
Eigen::internal::EIGEN_MEMBER_FUNCTOR ( any  ,
(Size-1)*NumTraits< Scalar >::AddCost   
)
Eigen::internal::EIGEN_MEMBER_FUNCTOR ( count  ,
(Size-1)*NumTraits< Scalar >::AddCost   
)
Eigen::internal::EIGEN_MEMBER_FUNCTOR ( prod  ,
(Size-1)*NumTraits< Scalar >::MulCost   
)
void Eigen::internal::eigen_pastix ( pastix_data_t **  pastix_data,
int  pastix_comm,
int  n,
int *  ptr,
int *  idx,
float *  vals,
int *  perm,
int *  invp,
float *  x,
int  nbrhs,
int *  iparm,
double *  dparm 
)

Definition at line 67 of file PaStiXSupport.h.

  {
    if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
    if (nbrhs == 0) {x = NULL; nbrhs=1;}
    s_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm); 
  }
void Eigen::internal::eigen_pastix ( pastix_data_t **  pastix_data,
int  pastix_comm,
int  n,
int *  ptr,
int *  idx,
double *  vals,
int *  perm,
int *  invp,
double *  x,
int  nbrhs,
int *  iparm,
double *  dparm 
)

Definition at line 74 of file PaStiXSupport.h.

  {
    if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
    if (nbrhs == 0) {x = NULL; nbrhs=1;}
    d_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm); 
  }
void Eigen::internal::eigen_pastix ( pastix_data_t **  pastix_data,
int  pastix_comm,
int  n,
int *  ptr,
int *  idx,
std::complex< float > *  vals,
int *  perm,
int *  invp,
std::complex< float > *  x,
int  nbrhs,
int *  iparm,
double *  dparm 
)

Definition at line 81 of file PaStiXSupport.h.

  {
    if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
    if (nbrhs == 0) {x = NULL; nbrhs=1;}
    c_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast<PASTIX_COMPLEX*>(vals), perm, invp, reinterpret_cast<PASTIX_COMPLEX*>(x), nbrhs, iparm, dparm); 
  }
void Eigen::internal::eigen_pastix ( pastix_data_t **  pastix_data,
int  pastix_comm,
int  n,
int *  ptr,
int *  idx,
std::complex< double > *  vals,
int *  perm,
int *  invp,
std::complex< double > *  x,
int  nbrhs,
int *  iparm,
double *  dparm 
)

Definition at line 88 of file PaStiXSupport.h.

  {
    if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
    if (nbrhs == 0) {x = NULL; nbrhs=1;}
    z_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast<PASTIX_DCOMPLEX*>(vals), perm, invp, reinterpret_cast<PASTIX_DCOMPLEX*>(x), nbrhs, iparm, dparm); 
  }
template<typename Index , typename IndexVector >
Index Eigen::internal::etree_find ( Index  i,
IndexVector &  pp 
)

Find the root of the tree/set containing the vertex i : Use Path halving

Definition at line 40 of file SparseColEtree.h.

{
  Index p = pp(i); // Parent 
  Index gp = pp(p); // Grand parent 
  while (gp != p) 
  {
    pp(i) = gp; // Parent pointer on find path is changed to former grand parent
    i = gp; 
    p = pp(i);
    gp = pp(p);
  }
  return p; 
}
template<typename LhsScalar , typename RhsScalar , int KcFactor>
void Eigen::internal::evaluateProductBlockingSizesHeuristic ( Index &  k,
Index &  m,
Index &  n,
Index  num_threads = 1 
)

Definition at line 93 of file GeneralBlockPanelKernel.h.

{
  typedef gebp_traits<LhsScalar,RhsScalar> Traits;

  // Explanations:
  // Let's recall that the product algorithms form mc x kc vertical panels A' on the lhs and
  // kc x nc blocks B' on the rhs. B' has to fit into L2/L3 cache. Moreover, A' is processed
  // per mr x kc horizontal small panels where mr is the blocking size along the m dimension
  // at the register level. This small horizontal panel has to stay within L1 cache.
  std::ptrdiff_t l1, l2, l3;
  manage_caching_sizes(GetAction, &l1, &l2, &l3);

  if (num_threads > 1) {
    typedef typename Traits::ResScalar ResScalar;
    enum {
      kdiv = KcFactor * (Traits::mr * sizeof(LhsScalar) + Traits::nr * sizeof(RhsScalar)),
      ksub = Traits::mr * Traits::nr * sizeof(ResScalar),
      k_mask = -8,

      mr = Traits::mr,
      mr_mask = -mr,

      nr = Traits::nr,
      nr_mask = -nr
    };
    // Increasing k gives us more time to prefetch the content of the "C"
    // registers. However once the latency is hidden there is no point in
    // increasing the value of k, so we'll cap it at 320 (value determined
    // experimentally).
    const Index k_cache = (std::min<Index>)((l1-ksub)/kdiv, 320);
    if (k_cache < k) {
      k = k_cache & k_mask;
      eigen_internal_assert(k > 0);
    }

    const Index n_cache = (l2-l1) / (nr * sizeof(RhsScalar) * k);
    const Index n_per_thread = numext::div_ceil(n, num_threads);
    if (n_cache <= n_per_thread) {
      // Don't exceed the capacity of the l2 cache.
      eigen_internal_assert(n_cache >= static_cast<Index>(nr));
      n = n_cache & nr_mask;
      eigen_internal_assert(n > 0);
    } else {
      n = (std::min<Index>)(n, (n_per_thread + nr - 1) & nr_mask);
    }

    if (l3 > l2) {
      // l3 is shared between all cores, so we'll give each thread its own chunk of l3.
      const Index m_cache = (l3-l2) / (sizeof(LhsScalar) * k * num_threads);
      const Index m_per_thread = numext::div_ceil(m, num_threads);
      if(m_cache < m_per_thread && m_cache >= static_cast<Index>(mr)) {
        m = m_cache & mr_mask;
        eigen_internal_assert(m > 0);
      } else {
        m = (std::min<Index>)(m, (m_per_thread + mr - 1) & mr_mask);
      }
    }
  }
  else {
    // In unit tests we do not want to use extra large matrices,
    // so we reduce the cache size to check the blocking strategy is not flawed
#ifdef EIGEN_DEBUG_SMALL_PRODUCT_BLOCKS
    l1 = 9*1024;
    l2 = 32*1024;
    l3 = 512*1024;
#endif
    
    // Early return for small problems because the computation below are time consuming for small problems.
    // Perhaps it would make more sense to consider k*n*m??
    // Note that for very tiny problem, this function should be bypassed anyway
    // because we use the coefficient-based implementation for them.
    if((std::max)(k,(std::max)(m,n))<48)
      return;
    
    typedef typename Traits::ResScalar ResScalar;
    enum {
      k_peeling = 8,
      k_div = KcFactor * (Traits::mr * sizeof(LhsScalar) + Traits::nr * sizeof(RhsScalar)),
      k_sub = Traits::mr * Traits::nr * sizeof(ResScalar)
    };
    
    // ---- 1st level of blocking on L1, yields kc ----
    
    // Blocking on the third dimension (i.e., k) is chosen so that an horizontal panel
    // of size mr x kc of the lhs plus a vertical panel of kc x nr of the rhs both fits within L1 cache.
    // We also include a register-level block of the result (mx x nr).
    // (In an ideal world only the lhs panel would stay in L1)
    // Moreover, kc has to be a multiple of 8 to be compatible with loop peeling, leading to a maximum blocking size of:
    const Index max_kc = std::max<Index>(((l1-k_sub)/k_div) & (~(k_peeling-1)),1);
    const Index old_k = k;
    if(k>max_kc)
    {
      // We are really blocking on the third dimension:
      // -> reduce blocking size to make sure the last block is as large as possible
      //    while keeping the same number of sweeps over the result.
      k = (k%max_kc)==0 ? max_kc
                        : max_kc - k_peeling * ((max_kc-1-(k%max_kc))/(k_peeling*(k/max_kc+1)));
                        
      eigen_internal_assert(((old_k/k) == (old_k/max_kc)) && "the number of sweeps has to remain the same");
    }
    
    // ---- 2nd level of blocking on max(L2,L3), yields nc ----
    
    // TODO find a reliable way to get the actual amount of cache per core to use for 2nd level blocking, that is:
    //      actual_l2 = max(l2, l3/nb_core_sharing_l3)
    // The number below is quite conservative: it is better to underestimate the cache size rather than overestimating it)
    // For instance, it corresponds to 6MB of L3 shared among 4 cores.
    #ifdef EIGEN_DEBUG_SMALL_PRODUCT_BLOCKS
    const Index actual_l2 = l3;
    #else
    const Index actual_l2 = 1572864; // == 1.5 MB
    #endif
    
    // Here, nc is chosen such that a block of kc x nc of the rhs fit within half of L2.
    // The second half is implicitly reserved to access the result and lhs coefficients.
    // When k<max_kc, then nc can arbitrarily growth. In practice, it seems to be fruitful
    // to limit this growth: we bound nc to growth by a factor x1.5.
    // However, if the entire lhs block fit within L1, then we are not going to block on the rows at all,
    // and it becomes fruitful to keep the packed rhs blocks in L1 if there is enough remaining space.
    Index max_nc;
    const Index lhs_bytes = m * k * sizeof(LhsScalar);
    const Index remaining_l1 = l1- k_sub - lhs_bytes;
    if(remaining_l1 >= Index(Traits::nr*sizeof(RhsScalar))*k)
    {
      // L1 blocking
      max_nc = remaining_l1 / (k*sizeof(RhsScalar));
    }
    else
    {
      // L2 blocking
      max_nc = (3*actual_l2)/(2*2*max_kc*sizeof(RhsScalar));
    }
    // WARNING Below, we assume that Traits::nr is a power of two.
    Index nc = std::min<Index>(actual_l2/(2*k*sizeof(RhsScalar)), max_nc) & (~(Traits::nr-1));
    if(n>nc)
    {
      // We are really blocking over the columns:
      // -> reduce blocking size to make sure the last block is as large as possible
      //    while keeping the same number of sweeps over the packed lhs.
      //    Here we allow one more sweep if this gives us a perfect match, thus the commented "-1"
      n = (n%nc)==0 ? nc
                    : (nc - Traits::nr * ((nc/*-1*/-(n%nc))/(Traits::nr*(n/nc+1))));
    }
    else if(old_k==k)
    {
      // So far, no blocking at all, i.e., kc==k, and nc==n.
      // In this case, let's perform a blocking over the rows such that the packed lhs data is kept in cache L1/L2
      // TODO: part of this blocking strategy is now implemented within the kernel itself, so the L1-based heuristic here should be obsolete.
      Index problem_size = k*n*sizeof(LhsScalar);
      Index actual_lm = actual_l2;
      Index max_mc = m;
      if(problem_size<=1024)
      {
        // problem is small enough to keep in L1
        // Let's choose m such that lhs's block fit in 1/3 of L1
        actual_lm = l1;
      }
      else if(l3!=0 && problem_size<=32768)
      {
        // we have both L2 and L3, and problem is small enough to be kept in L2
        // Let's choose m such that lhs's block fit in 1/3 of L2
        actual_lm = l2;
        max_mc = (std::min<Index>)(576,max_mc);
      }
      Index mc = (std::min<Index>)(actual_lm/(3*k*sizeof(LhsScalar)), max_mc);
      if (mc > Traits::mr) mc -= mc % Traits::mr;
      else if (mc==0) return;
      m = (m%mc)==0 ? mc
                    : (mc - Traits::mr * ((mc/*-1*/-(m%mc))/(Traits::mr*(m/mc+1))));
    }
  }
}
template<typename T >
const T::Scalar* Eigen::internal::extract_data ( const T m)

Definition at line 360 of file BlasUtil.h.

{
  return extract_data_selector<T>::run(m);
}
template<typename IndexType >
static IndexType Eigen::internal::find_ordering ( IndexType  n_row,
IndexType  n_col,
IndexType  Alen,
Colamd_Row< IndexType >  Row[],
colamd_col< IndexType >  Col[],
IndexType  A[],
IndexType  head[],
IndexType  n_col2,
IndexType  max_deg,
IndexType  pfree 
) [static]

Definition at line 937 of file Ordering.h.

template<int Alignment, typename Scalar , typename Index >
EIGEN_DEVICE_FUNC Index Eigen::internal::first_aligned ( const Scalar *  array,
Index  size 
) [inline]

Returns the index of the first element of the array that is well aligned with respect to the requested Alignment.

Template Parameters:
Alignmentrequested alignment in Bytes.
Parameters:
arraythe address of the start of the array
sizethe size of the array
Note:
If no element of the array is well aligned or the requested alignment is not a multiple of a scalar, the size of the array is returned. For example with SSE, the requested alignment is typically 16-bytes. If packet size for the given scalar type is 1, then everything is considered well-aligned.
Otherwise, if the Alignment is larger that the scalar size, we rely on the assumptions that sizeof(Scalar) is a power of 2. On the other hand, we do not assume that the array address is a multiple of sizeof(Scalar), as that fails for example with Scalar=double on certain 32-bit platforms, see bug #79.

There is also the variant first_aligned(const MatrixBase&) defined in DenseCoeffsBase.h.

See also:
first_default_aligned()

Definition at line 436 of file Memory.h.

{
  const Index ScalarSize = sizeof(Scalar);
  const Index AlignmentSize = Alignment / ScalarSize;
  const Index AlignmentMask = AlignmentSize-1;

  if(AlignmentSize<=1)
  {
    // Either the requested alignment if smaller than a scalar, or it exactly match a 1 scalar
    // so that all elements of the array have the same alignment.
    return 0;
  }
  else if( (std::size_t(array) & (sizeof(Scalar)-1)) || (Alignment%ScalarSize)!=0)
  {
    // The array is not aligned to the size of a single scalar, or the requested alignment is not a multiple of the scalar size.
    // Consequently, no element of the array is well aligned.
    return size;
  }
  else
  {
    Index first = (AlignmentSize - (Index((std::size_t(array)/sizeof(Scalar))) & AlignmentMask)) & AlignmentMask;
    return (first < size) ? first : size;
  }
}
template<int Alignment, typename Derived >
static Index Eigen::internal::first_aligned ( const DenseBase< Derived > &  m) [inline, static]
Returns:
the index of the first element of the array stored by m that is properly aligned with respect to Alignment for vectorization.
Template Parameters:
Alignmentrequested alignment in Bytes.

There is also the variant first_aligned(const Scalar*, Integer) defined in Memory.h. See it for more documentation.

Definition at line 639 of file DenseCoeffsBase.h.

{
  enum { ReturnZero = (int(evaluator<Derived>::Alignment) >= Alignment) || !(Derived::Flags & DirectAccessBit) };
  return first_aligned_impl<Alignment, Derived, ReturnZero>::run(m.derived());
}
template<typename Scalar , typename Index >
EIGEN_DEVICE_FUNC Index Eigen::internal::first_default_aligned ( const Scalar *  array,
Index  size 
) [inline]

Returns the index of the first element of the array that is well aligned with respect the largest packet requirement.

See also:
first_aligned(Scalar*,Index) and first_default_aligned(DenseBase<Derived>)

Definition at line 464 of file Memory.h.

{
  typedef typename packet_traits<Scalar>::type DefaultPacketType;
  return first_aligned<unpacket_traits<DefaultPacketType>::alignment>(array, size);
}
template<typename Derived >
static Index Eigen::internal::first_default_aligned ( const DenseBase< Derived > &  m) [inline, static]

Definition at line 646 of file DenseCoeffsBase.h.

{
  typedef typename Derived::Scalar Scalar;
  typedef typename packet_traits<Scalar>::type DefaultPacketType;
  return internal::first_aligned<int(unpacket_traits<DefaultPacketType>::alignment),Derived>(m);
}
template<typename Index >
Index Eigen::internal::first_multiple ( Index  size,
Index  base 
) [inline]

Returns the smallest integer multiple of base and greater or equal to size

Definition at line 473 of file Memory.h.

{
  return ((size+base-1)/base)*base;
}
template<typename MatrixType >
void Eigen::internal::fortran_to_c_numbering ( MatrixType &  mat)

Definition at line 111 of file PaStiXSupport.h.

  {
    // Check the Numbering
    if ( mat.outerIndexPtr()[0] == 1 ) 
    { // Convert to C-style numbering
      int i;
      for(i = 0; i <= mat.rows(); ++i)
        --mat.outerIndexPtr()[i];
      for(i = 0; i < mat.nonZeros(); ++i)
        --mat.innerIndexPtr()[i];
    }
  }
template<typename IndexType >
static IndexType Eigen::internal::garbage_collection ( IndexType  n_row,
IndexType  n_col,
Colamd_Row< IndexType >  Row[],
colamd_col< IndexType >  Col[],
IndexType  A[],
IndexType *  pfree 
) [static]

Definition at line 1700 of file Ordering.h.

template<typename CJ , typename A , typename B , typename C , typename T >
EIGEN_STRONG_INLINE void Eigen::internal::gebp_madd ( const CJ &  cj,
A &  a,
B &  b,
C &  c,
T t 
)

Definition at line 344 of file GeneralBlockPanelKernel.h.

  {
    gebp_madd_selector<CJ,A,B,C,T>::run(cj,a,b,c,t);
  }
template<typename Derived >
EIGEN_DEVICE_FUNC const Derived::Scalar Eigen::internal::general_det3_helper ( const MatrixBase< Derived > &  matrix,
int  i1,
int  i2,
int  i3,
int  j1,
int  j2,
int  j3 
) [inline]

Definition at line 205 of file InverseImpl.h.

{
  return matrix.coeff(i1,j1)
         * (matrix.coeff(i2,j2) * matrix.coeff(i3,j3) - matrix.coeff(i2,j3) * matrix.coeff(i3,j2));
}
void Eigen::internal::handmade_aligned_free ( void *  ptr) [inline]

Frees memory allocated with handmade_aligned_malloc

Definition at line 96 of file Memory.h.

{
  if (ptr) std::free(*(reinterpret_cast<void**>(ptr) - 1));
}
void* Eigen::internal::handmade_aligned_malloc ( std::size_t  size) [inline]

Like malloc, but the returned pointer is guaranteed to be 16-byte aligned. Fast, but wastes 16 additional bytes of memory. Does not throw any exception.

Definition at line 86 of file Memory.h.

{
  void *original = std::malloc(size+EIGEN_DEFAULT_ALIGN_BYTES);
  if (original == 0) return 0;
  void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1))) + EIGEN_DEFAULT_ALIGN_BYTES);
  *(reinterpret_cast<void**>(aligned) - 1) = original;
  return aligned;
}
void* Eigen::internal::handmade_aligned_realloc ( void *  ptr,
std::size_t  size,
std::size_t  = 0 
) [inline]

Reallocates aligned memory. Since we know that our handmade version is based on std::malloc we can use std::realloc to implement efficient reallocation.

Definition at line 106 of file Memory.h.

{
  if (ptr == 0) return handmade_aligned_malloc(size);
  void *original = *(reinterpret_cast<void**>(ptr) - 1);
  std::ptrdiff_t previous_offset = static_cast<char *>(ptr)-static_cast<char *>(original);
  original = std::realloc(original,size+EIGEN_DEFAULT_ALIGN_BYTES);
  if (original == 0) return 0;
  void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1))) + EIGEN_DEFAULT_ALIGN_BYTES);
  void *previous_aligned = static_cast<char *>(original)+previous_offset;
  if(aligned!=previous_aligned)
    std::memmove(aligned, previous_aligned, size);
  
  *(reinterpret_cast<void**>(aligned) - 1) = original;
  return aligned;
}
template<typename MatrixQR , typename HCoeffs >
void Eigen::internal::householder_qr_inplace_unblocked ( MatrixQR &  mat,
HCoeffs &  hCoeffs,
typename MatrixQR::Scalar *  tempData = 0 
)

Definition at line 234 of file HouseholderQR.h.

{
  typedef typename MatrixQR::Scalar Scalar;
  typedef typename MatrixQR::RealScalar RealScalar;
  Index rows = mat.rows();
  Index cols = mat.cols();
  Index size = (std::min)(rows,cols);

  eigen_assert(hCoeffs.size() == size);

  typedef Matrix<Scalar,MatrixQR::ColsAtCompileTime,1> TempType;
  TempType tempVector;
  if(tempData==0)
  {
    tempVector.resize(cols);
    tempData = tempVector.data();
  }

  for(Index k = 0; k < size; ++k)
  {
    Index remainingRows = rows - k;
    Index remainingCols = cols - k - 1;

    RealScalar beta;
    mat.col(k).tail(remainingRows).makeHouseholderInPlace(hCoeffs.coeffRef(k), beta);
    mat.coeffRef(k,k) = beta;

    // apply H to remaining part of m_qr from the left
    mat.bottomRightCorner(remainingRows, remainingCols)
        .applyHouseholderOnTheLeft(mat.col(k).tail(remainingRows-1), hCoeffs.coeffRef(k), tempData+k+1);
  }
}
template<typename T >
EIGEN_DEVICE_FUNC void Eigen::internal::ignore_unused_variable ( const T )

Definition at line 559 of file Macros.h.

{}
template<typename IndexType >
static IndexType Eigen::internal::init_rows_cols ( IndexType  n_row,
IndexType  n_col,
Colamd_Row< IndexType >  Row[],
colamd_col< IndexType >  col[],
IndexType  A[],
IndexType  p[],
IndexType  stats[COLAMD_STATS] 
) [static]

Definition at line 484 of file Ordering.h.

template<typename IndexType >
static void Eigen::internal::init_scoring ( IndexType  n_row,
IndexType  n_col,
Colamd_Row< IndexType >  Row[],
colamd_col< IndexType >  Col[],
IndexType  A[],
IndexType  head[],
double  knobs[COLAMD_KNOBS],
IndexType *  p_n_row2,
IndexType *  p_n_col2,
IndexType *  p_max_deg 
) [static]

Definition at line 700 of file Ordering.h.

template<typename T1 , typename T2 >
bool Eigen::internal::is_same_dense ( const T1 &  mat1,
const T2 &  mat2,
typename enable_if< has_direct_access< T1 >::ret &&has_direct_access< T2 >::ret, T1 >::type = 0 
)

Definition at line 648 of file XprHelper.h.

{
  return (mat1.data()==mat2.data()) && (mat1.innerStride()==mat2.innerStride()) && (mat1.outerStride()==mat2.outerStride());
}
template<typename T1 , typename T2 >
bool Eigen::internal::is_same_dense ( const T1 &  ,
const T2 &  ,
typename enable_if<!(has_direct_access< T1 >::ret &&has_direct_access< T2 >::ret), T1 >::type = 0 
)

Definition at line 654 of file XprHelper.h.

{
  return false;
}
template<typename MatrixType , typename Rhs , typename Dest , typename Preconditioner >
EIGEN_DONT_INLINE void Eigen::internal::least_square_conjugate_gradient ( const MatrixType &  mat,
const Rhs &  rhs,
Dest &  x,
const Preconditioner &  precond,
Index &  iters,
typename Dest::RealScalar &  tol_error 
)

Low-level conjugate gradient algorithm for least-square problems

Parameters:
matThe matrix A
rhsThe right hand side vector b
xOn input and initial solution, on output the computed solution.
precondA preconditioner being able to efficiently solve for an approximation of A'Ax=b (regardless of b)
itersOn input the max number of iteration, on output the number of performed iterations.
tol_errorOn input the tolerance error, on output an estimation of the relative error.

Definition at line 28 of file LeastSquareConjugateGradient.h.

{
  using std::sqrt;
  using std::abs;
  typedef typename Dest::RealScalar RealScalar;
  typedef typename Dest::Scalar Scalar;
  typedef Matrix<Scalar,Dynamic,1> VectorType;
  
  RealScalar tol = tol_error;
  Index maxIters = iters;
  
  Index m = mat.rows(), n = mat.cols();

  VectorType residual        = rhs - mat * x;
  VectorType normal_residual = mat.adjoint() * residual;

  RealScalar rhsNorm2 = (mat.adjoint()*rhs).squaredNorm();
  if(rhsNorm2 == 0) 
  {
    x.setZero();
    iters = 0;
    tol_error = 0;
    return;
  }
  RealScalar threshold = tol*tol*rhsNorm2;
  RealScalar residualNorm2 = normal_residual.squaredNorm();
  if (residualNorm2 < threshold)
  {
    iters = 0;
    tol_error = sqrt(residualNorm2 / rhsNorm2);
    return;
  }
  
  VectorType p(n);
  p = precond.solve(normal_residual);                         // initial search direction

  VectorType z(n), tmp(m);
  RealScalar absNew = numext::real(normal_residual.dot(p));  // the square of the absolute value of r scaled by invM
  Index i = 0;
  while(i < maxIters)
  {
    tmp.noalias() = mat * p;

    Scalar alpha = absNew / tmp.squaredNorm();      // the amount we travel on dir
    x += alpha * p;                                 // update solution
    residual -= alpha * tmp;                        // update residual
    normal_residual = mat.adjoint() * residual;     // update residual of the normal equation
    
    residualNorm2 = normal_residual.squaredNorm();
    if(residualNorm2 < threshold)
      break;
    
    z = precond.solve(normal_residual);             // approximately solve for "A'A z = normal_residual"

    RealScalar absOld = absNew;
    absNew = numext::real(normal_residual.dot(z));  // update the absolute value of r
    RealScalar beta = absNew / absOld;              // calculate the Gram-Schmidt value used to create the new search direction
    p = z + beta * p;                               // update search direction
    i++;
  }
  tol_error = sqrt(residualNorm2 / rhsNorm2);
  iters = i;
}
template<typename MatrixType , typename VectorType >
static Index Eigen::internal::llt_rank_update_lower ( MatrixType &  mat,
const VectorType &  vec,
const typename MatrixType::RealScalar &  sigma 
) [static]

Definition at line 195 of file LLT.h.

{
  using std::sqrt;
  typedef typename MatrixType::Scalar Scalar;
  typedef typename MatrixType::RealScalar RealScalar;
  typedef typename MatrixType::ColXpr ColXpr;
  typedef typename internal::remove_all<ColXpr>::type ColXprCleaned;
  typedef typename ColXprCleaned::SegmentReturnType ColXprSegment;
  typedef Matrix<Scalar,Dynamic,1> TempVectorType;
  typedef typename TempVectorType::SegmentReturnType TempVecSegment;

  Index n = mat.cols();
  eigen_assert(mat.rows()==n && vec.size()==n);

  TempVectorType temp;

  if(sigma>0)
  {
    // This version is based on Givens rotations.
    // It is faster than the other one below, but only works for updates,
    // i.e., for sigma > 0
    temp = sqrt(sigma) * vec;

    for(Index i=0; i<n; ++i)
    {
      JacobiRotation<Scalar> g;
      g.makeGivens(mat(i,i), -temp(i), &mat(i,i));

      Index rs = n-i-1;
      if(rs>0)
      {
        ColXprSegment x(mat.col(i).tail(rs));
        TempVecSegment y(temp.tail(rs));
        apply_rotation_in_the_plane(x, y, g);
      }
    }
  }
  else
  {
    temp = vec;
    RealScalar beta = 1;
    for(Index j=0; j<n; ++j)
    {
      RealScalar Ljj = numext::real(mat.coeff(j,j));
      RealScalar dj = numext::abs2(Ljj);
      Scalar wj = temp.coeff(j);
      RealScalar swj2 = sigma*numext::abs2(wj);
      RealScalar gamma = dj*beta + swj2;

      RealScalar x = dj + swj2/beta;
      if (x<=RealScalar(0))
        return j;
      RealScalar nLjj = sqrt(x);
      mat.coeffRef(j,j) = nLjj;
      beta += swj2/dj;

      // Update the terms of L
      Index rs = n-j-1;
      if(rs)
      {
        temp.tail(rs) -= (wj/Ljj) * mat.col(j).tail(rs);
        if(gamma != 0)
          mat.col(j).tail(rs) = (nLjj/Ljj) * mat.col(j).tail(rs) + (nLjj * sigma*numext::conj(wj)/gamma)*temp.tail(rs);
      }
    }
  }
  return -1;
}
Index Eigen::internal::LUnumTempV ( Index &  m,
Index &  w,
Index &  t,
Index &  b 
) [inline]

Definition at line 39 of file SparseLU_Memory.h.

{
  return (std::max)(m, (t+b)*w);
}
template<typename Scalar >
Index Eigen::internal::LUTempSpace ( Index &  m,
Index &  w 
) [inline]

Definition at line 45 of file SparseLU_Memory.h.

{
  return (2*w + 4 + LUNoMarker) * m * sizeof(Index) + (w + 1) * m * sizeof(Scalar);
}
template<typename TriangularFactorType , typename VectorsType , typename CoeffsType >
void Eigen::internal::make_block_householder_triangular_factor ( TriangularFactorType &  triFactor,
const VectorsType &  vectors,
const CoeffsType &  hCoeffs 
)

Definition at line 51 of file BlockHouseholder.h.

{
  const Index nbVecs = vectors.cols();
  eigen_assert(triFactor.rows() == nbVecs && triFactor.cols() == nbVecs && vectors.rows()>=nbVecs);

  for(Index i = nbVecs-1; i >=0 ; --i)
  {
    Index rs = vectors.rows() - i - 1;
    Index rt = nbVecs-i-1;

    if(rt>0)
    {
      triFactor.row(i).tail(rt).noalias() = -hCoeffs(i) * vectors.col(i).tail(rs).adjoint()
                                                        * vectors.bottomRightCorner(rs, rt).template triangularView<UnitLower>();
            
      // FIXME add .noalias() once the triangular product can work inplace
      triFactor.row(i).tail(rt) = triFactor.row(i).tail(rt) * triFactor.bottomRightCorner(rt,rt).template triangularView<Upper>();
      
    }
    triFactor(i,i) = hCoeffs(i);
  }
}
void Eigen::internal::manage_caching_sizes ( Action  action,
std::ptrdiff_t *  l1,
std::ptrdiff_t *  l2,
std::ptrdiff_t *  l3 
) [inline]

Definition at line 55 of file GeneralBlockPanelKernel.h.

{
  static CacheSizes m_cacheSizes;

  if(action==SetAction)
  {
    // set the cpu cache size and cache all block sizes from a global cache size in byte
    eigen_internal_assert(l1!=0 && l2!=0);
    m_cacheSizes.m_l1 = *l1;
    m_cacheSizes.m_l2 = *l2;
    m_cacheSizes.m_l3 = *l3;
  }
  else if(action==GetAction)
  {
    eigen_internal_assert(l1!=0 && l2!=0);
    *l1 = m_cacheSizes.m_l1;
    *l2 = m_cacheSizes.m_l2;
    *l3 = m_cacheSizes.m_l3;
  }
  else
  {
    eigen_internal_assert(false);
  }
}
std::ptrdiff_t Eigen::internal::manage_caching_sizes_helper ( std::ptrdiff_t  a,
std::ptrdiff_t  b 
) [inline]
Returns:
b if a<=0, and returns a otherwise.

Definition at line 23 of file GeneralBlockPanelKernel.h.

{
  return a<=0 ? b : a;
}
void Eigen::internal::manage_multi_threading ( Action  action,
int *  v 
) [inline]

Definition at line 18 of file Parallelizer.h.

{
  static EIGEN_UNUSED int m_maxThreads = -1;

  if(action==SetAction)
  {
    eigen_internal_assert(v!=0);
    m_maxThreads = *v;
  }
  else if(action==GetAction)
  {
    eigen_internal_assert(v!=0);
    #ifdef EIGEN_HAS_OPENMP
    if(m_maxThreads>0)
      *v = m_maxThreads;
    else
      *v = omp_get_max_threads();
    #else
    *v = 1;
    #endif
  }
  else
  {
    eigen_internal_assert(false);
  }
}
template<typename Scalar , int Flags, typename Index >
MappedSparseMatrix<Scalar,Flags,Index> Eigen::internal::map_superlu ( SluMatrix &  sluMat)

View a Super LU matrix as an Eigen expression

Definition at line 272 of file SuperLUSupport.h.

{
  eigen_assert((Flags&RowMajor)==RowMajor && sluMat.Stype == SLU_NR
         || (Flags&ColMajor)==ColMajor && sluMat.Stype == SLU_NC);

  Index outerSize = (Flags&RowMajor)==RowMajor ? sluMat.ncol : sluMat.nrow;

  return MappedSparseMatrix<Scalar,Flags,Index>(
    sluMat.nrow, sluMat.ncol, sluMat.storage.outerInd[outerSize],
    sluMat.storage.outerInd, sluMat.storage.innerInd, reinterpret_cast<Scalar*>(sluMat.storage.values) );
}
template<typename Scalar , typename StorageIndex >
void Eigen::internal::minimum_degree_ordering ( SparseMatrix< Scalar, ColMajor, StorageIndex > &  C,
PermutationMatrix< Dynamic, Dynamic, StorageIndex > &  perm 
)

Approximate minimum degree ordering algorithm.

Parameters:
[in]Cthe input selfadjoint matrix stored in compressed column major format.
[out]permthe permutation P reducing the fill-in of the input matrix C

Note that the input matrix C must be complete, that is both the upper and lower parts have to be stored, as well as the diagonal entries. On exit the values of C are destroyed

Definition at line 94 of file Amd.h.

{
  using std::sqrt;
  
  StorageIndex d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1,
                k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi,
                ok, nel = 0, p, p1, p2, p3, p4, pj, pk, pk1, pk2, pn, q, t, h;
  
  StorageIndex n = StorageIndex(C.cols());
  dense = std::max<StorageIndex> (16, StorageIndex(10 * sqrt(double(n))));   /* find dense threshold */
  dense = (std::min)(n-2, dense);
  
  StorageIndex cnz = StorageIndex(C.nonZeros());
  perm.resize(n+1);
  t = cnz + cnz/5 + 2*n;                 /* add elbow room to C */
  C.resizeNonZeros(t);
  
  // get workspace
  ei_declare_aligned_stack_constructed_variable(StorageIndex,W,8*(n+1),0);
  StorageIndex* len     = W;
  StorageIndex* nv      = W +   (n+1);
  StorageIndex* next    = W + 2*(n+1);
  StorageIndex* head    = W + 3*(n+1);
  StorageIndex* elen    = W + 4*(n+1);
  StorageIndex* degree  = W + 5*(n+1);
  StorageIndex* w       = W + 6*(n+1);
  StorageIndex* hhead   = W + 7*(n+1);
  StorageIndex* last    = perm.indices().data();                              /* use P as workspace for last */
  
  /* --- Initialize quotient graph ---------------------------------------- */
  StorageIndex* Cp = C.outerIndexPtr();
  StorageIndex* Ci = C.innerIndexPtr();
  for(k = 0; k < n; k++)
    len[k] = Cp[k+1] - Cp[k];
  len[n] = 0;
  nzmax = t;
  
  for(i = 0; i <= n; i++)
  {
    head[i]   = -1;                     // degree list i is empty
    last[i]   = -1;
    next[i]   = -1;
    hhead[i]  = -1;                     // hash list i is empty 
    nv[i]     = 1;                      // node i is just one node
    w[i]      = 1;                      // node i is alive
    elen[i]   = 0;                      // Ek of node i is empty
    degree[i] = len[i];                 // degree of node i
  }
  mark = internal::cs_wclear<StorageIndex>(0, 0, w, n);         /* clear w */
  
  /* --- Initialize degree lists ------------------------------------------ */
  for(i = 0; i < n; i++)
  {
    bool has_diag = false;
    for(p = Cp[i]; p<Cp[i+1]; ++p)
      if(Ci[p]==i)
      {
        has_diag = true;
        break;
      }
   
    d = degree[i];
    if(d == 1 && has_diag)           /* node i is empty */
    {
      elen[i] = -2;                 /* element i is dead */
      nel++;
      Cp[i] = -1;                   /* i is a root of assembly tree */
      w[i] = 0;
    }
    else if(d > dense || !has_diag)  /* node i is dense or has no structural diagonal element */
    {
      nv[i] = 0;                    /* absorb i into element n */
      elen[i] = -1;                 /* node i is dead */
      nel++;
      Cp[i] = amd_flip (n);
      nv[n]++;
    }
    else
    {
      if(head[d] != -1) last[head[d]] = i;
      next[i] = head[d];           /* put node i in degree list d */
      head[d] = i;
    }
  }
  
  elen[n] = -2;                         /* n is a dead element */
  Cp[n] = -1;                           /* n is a root of assembly tree */
  w[n] = 0;                             /* n is a dead element */
  
  while (nel < n)                         /* while (selecting pivots) do */
  {
    /* --- Select node of minimum approximate degree -------------------- */
    for(k = -1; mindeg < n && (k = head[mindeg]) == -1; mindeg++) {}
    if(next[k] != -1) last[next[k]] = -1;
    head[mindeg] = next[k];          /* remove k from degree list */
    elenk = elen[k];                  /* elenk = |Ek| */
    nvk = nv[k];                      /* # of nodes k represents */
    nel += nvk;                        /* nv[k] nodes of A eliminated */
    
    /* --- Garbage collection ------------------------------------------- */
    if(elenk > 0 && cnz + mindeg >= nzmax)
    {
      for(j = 0; j < n; j++)
      {
        if((p = Cp[j]) >= 0)      /* j is a live node or element */
        {
          Cp[j] = Ci[p];          /* save first entry of object */
          Ci[p] = amd_flip (j);    /* first entry is now amd_flip(j) */
        }
      }
      for(q = 0, p = 0; p < cnz; ) /* scan all of memory */
      {
        if((j = amd_flip (Ci[p++])) >= 0)  /* found object j */
        {
          Ci[q] = Cp[j];       /* restore first entry of object */
          Cp[j] = q++;          /* new pointer to object j */
          for(k3 = 0; k3 < len[j]-1; k3++) Ci[q++] = Ci[p++];
        }
      }
      cnz = q;                       /* Ci[cnz...nzmax-1] now free */
    }
    
    /* --- Construct new element ---------------------------------------- */
    dk = 0;
    nv[k] = -nvk;                     /* flag k as in Lk */
    p = Cp[k];
    pk1 = (elenk == 0) ? p : cnz;      /* do in place if elen[k] == 0 */
    pk2 = pk1;
    for(k1 = 1; k1 <= elenk + 1; k1++)
    {
      if(k1 > elenk)
      {
        e = k;                     /* search the nodes in k */
        pj = p;                    /* list of nodes starts at Ci[pj]*/
        ln = len[k] - elenk;      /* length of list of nodes in k */
      }
      else
      {
        e = Ci[p++];              /* search the nodes in e */
        pj = Cp[e];
        ln = len[e];              /* length of list of nodes in e */
      }
      for(k2 = 1; k2 <= ln; k2++)
      {
        i = Ci[pj++];
        if((nvi = nv[i]) <= 0) continue; /* node i dead, or seen */
        dk += nvi;                 /* degree[Lk] += size of node i */
        nv[i] = -nvi;             /* negate nv[i] to denote i in Lk*/
        Ci[pk2++] = i;            /* place i in Lk */
        if(next[i] != -1) last[next[i]] = last[i];
        if(last[i] != -1)         /* remove i from degree list */
        {
          next[last[i]] = next[i];
        }
        else
        {
          head[degree[i]] = next[i];
        }
      }
      if(e != k)
      {
        Cp[e] = amd_flip (k);      /* absorb e into k */
        w[e] = 0;                 /* e is now a dead element */
      }
    }
    if(elenk != 0) cnz = pk2;         /* Ci[cnz...nzmax] is free */
    degree[k] = dk;                   /* external degree of k - |Lk\i| */
    Cp[k] = pk1;                      /* element k is in Ci[pk1..pk2-1] */
    len[k] = pk2 - pk1;
    elen[k] = -2;                     /* k is now an element */
    
    /* --- Find set differences ----------------------------------------- */
    mark = internal::cs_wclear<StorageIndex>(mark, lemax, w, n);  /* clear w if necessary */
    for(pk = pk1; pk < pk2; pk++)    /* scan 1: find |Le\Lk| */
    {
      i = Ci[pk];
      if((eln = elen[i]) <= 0) continue;/* skip if elen[i] empty */
      nvi = -nv[i];                      /* nv[i] was negated */
      wnvi = mark - nvi;
      for(p = Cp[i]; p <= Cp[i] + eln - 1; p++)  /* scan Ei */
      {
        e = Ci[p];
        if(w[e] >= mark)
        {
          w[e] -= nvi;          /* decrement |Le\Lk| */
        }
        else if(w[e] != 0)        /* ensure e is a live element */
        {
          w[e] = degree[e] + wnvi; /* 1st time e seen in scan 1 */
        }
      }
    }
    
    /* --- Degree update ------------------------------------------------ */
    for(pk = pk1; pk < pk2; pk++)    /* scan2: degree update */
    {
      i = Ci[pk];                   /* consider node i in Lk */
      p1 = Cp[i];
      p2 = p1 + elen[i] - 1;
      pn = p1;
      for(h = 0, d = 0, p = p1; p <= p2; p++)    /* scan Ei */
      {
        e = Ci[p];
        if(w[e] != 0)             /* e is an unabsorbed element */
        {
          dext = w[e] - mark;   /* dext = |Le\Lk| */
          if(dext > 0)
          {
            d += dext;         /* sum up the set differences */
            Ci[pn++] = e;     /* keep e in Ei */
            h += e;            /* compute the hash of node i */
          }
          else
          {
            Cp[e] = amd_flip (k);  /* aggressive absorb. e->k */
            w[e] = 0;             /* e is a dead element */
          }
        }
      }
      elen[i] = pn - p1 + 1;        /* elen[i] = |Ei| */
      p3 = pn;
      p4 = p1 + len[i];
      for(p = p2 + 1; p < p4; p++) /* prune edges in Ai */
      {
        j = Ci[p];
        if((nvj = nv[j]) <= 0) continue; /* node j dead or in Lk */
        d += nvj;                  /* degree(i) += |j| */
        Ci[pn++] = j;             /* place j in node list of i */
        h += j;                    /* compute hash for node i */
      }
      if(d == 0)                     /* check for mass elimination */
      {
        Cp[i] = amd_flip (k);      /* absorb i into k */
        nvi = -nv[i];
        dk -= nvi;                 /* |Lk| -= |i| */
        nvk += nvi;                /* |k| += nv[i] */
        nel += nvi;
        nv[i] = 0;
        elen[i] = -1;             /* node i is dead */
      }
      else
      {
        degree[i] = std::min<StorageIndex> (degree[i], d);   /* update degree(i) */
        Ci[pn] = Ci[p3];         /* move first node to end */
        Ci[p3] = Ci[p1];         /* move 1st el. to end of Ei */
        Ci[p1] = k;               /* add k as 1st element in of Ei */
        len[i] = pn - p1 + 1;     /* new len of adj. list of node i */
        h %= n;                    /* finalize hash of i */
        next[i] = hhead[h];      /* place i in hash bucket */
        hhead[h] = i;
        last[i] = h;      /* save hash of i in last[i] */
      }
    }                                   /* scan2 is done */
    degree[k] = dk;                   /* finalize |Lk| */
    lemax = std::max<StorageIndex>(lemax, dk);
    mark = internal::cs_wclear<StorageIndex>(mark+lemax, lemax, w, n);    /* clear w */
    
    /* --- Supernode detection ------------------------------------------ */
    for(pk = pk1; pk < pk2; pk++)
    {
      i = Ci[pk];
      if(nv[i] >= 0) continue;         /* skip if i is dead */
      h = last[i];                      /* scan hash bucket of node i */
      i = hhead[h];
      hhead[h] = -1;                    /* hash bucket will be empty */
      for(; i != -1 && next[i] != -1; i = next[i], mark++)
      {
        ln = len[i];
        eln = elen[i];
        for(p = Cp[i]+1; p <= Cp[i] + ln-1; p++) w[Ci[p]] = mark;
        jlast = i;
        for(j = next[i]; j != -1; ) /* compare i with all j */
        {
          ok = (len[j] == ln) && (elen[j] == eln);
          for(p = Cp[j] + 1; ok && p <= Cp[j] + ln - 1; p++)
          {
            if(w[Ci[p]] != mark) ok = 0;    /* compare i and j*/
          }
          if(ok)                     /* i and j are identical */
          {
            Cp[j] = amd_flip (i);  /* absorb j into i */
            nv[i] += nv[j];
            nv[j] = 0;
            elen[j] = -1;         /* node j is dead */
            j = next[j];          /* delete j from hash bucket */
            next[jlast] = j;
          }
          else
          {
            jlast = j;             /* j and i are different */
            j = next[j];
          }
        }
      }
    }
    
    /* --- Finalize new element------------------------------------------ */
    for(p = pk1, pk = pk1; pk < pk2; pk++)   /* finalize Lk */
    {
      i = Ci[pk];
      if((nvi = -nv[i]) <= 0) continue;/* skip if i is dead */
      nv[i] = nvi;                      /* restore nv[i] */
      d = degree[i] + dk - nvi;         /* compute external degree(i) */
      d = std::min<StorageIndex> (d, n - nel - nvi);
      if(head[d] != -1) last[head[d]] = i;
      next[i] = head[d];               /* put i back in degree list */
      last[i] = -1;
      head[d] = i;
      mindeg = std::min<StorageIndex> (mindeg, d);       /* find new minimum degree */
      degree[i] = d;
      Ci[p++] = i;                      /* place i in Lk */
    }
    nv[k] = nvk;                      /* # nodes absorbed into k */
    if((len[k] = p-pk1) == 0)         /* length of adj list of element k*/
    {
      Cp[k] = -1;                   /* k is a root of the tree */
      w[k] = 0;                     /* k is now a dead element */
    }
    if(elenk != 0) cnz = p;           /* free unused space in Lk */
  }
  
  /* --- Postordering ----------------------------------------------------- */
  for(i = 0; i < n; i++) Cp[i] = amd_flip (Cp[i]);/* fix assembly tree */
  for(j = 0; j <= n; j++) head[j] = -1;
  for(j = n; j >= 0; j--)              /* place unordered nodes in lists */
  {
    if(nv[j] > 0) continue;          /* skip if j is an element */
    next[j] = head[Cp[j]];          /* place j in list of its parent */
    head[Cp[j]] = j;
  }
  for(e = n; e >= 0; e--)              /* place elements in lists */
  {
    if(nv[e] <= 0) continue;         /* skip unless e is an element */
    if(Cp[e] != -1)
    {
      next[e] = head[Cp[e]];      /* place e in list of its parent */
      head[Cp[e]] = e;
    }
  }
  for(k = 0, i = 0; i <= n; i++)       /* postorder the assembly tree */
  {
    if(Cp[i] == -1) k = internal::cs_tdfs<StorageIndex>(i, k, head, next, perm.indices().data(), w);
  }
  
  perm.indices().conservativeResize(n);
}
template<typename IndexVector >
void Eigen::internal::nr_etdfs ( typename IndexVector::Scalar  n,
IndexVector &  parent,
IndexVector &  first_kid,
IndexVector &  next_kid,
IndexVector &  post,
typename IndexVector::Scalar  postnum 
)

Depth-first search from vertex n. No recursion. This routine was contributed by Cédric Doucet, CEDRAT Group, Meylan, France.

Definition at line 130 of file SparseColEtree.h.

{
  typedef typename IndexVector::Scalar StorageIndex;
  StorageIndex current = n, first, next;
  while (postnum != n) 
  {
    // No kid for the current node
    first = first_kid(current);
    
    // no kid for the current node
    if (first == -1) 
    {
      // Numbering this node because it has no kid 
      post(current) = postnum++;
      
      // looking for the next kid 
      next = next_kid(current); 
      while (next == -1) 
      {
        // No more kids : back to the parent node
        current = parent(current); 
        // numbering the parent node 
        post(current) = postnum++;
        
        // Get the next kid 
        next = next_kid(current); 
      }
      // stopping criterion 
      if (postnum == n+1) return; 
      
      // Updating current node 
      current = next; 
    }
    else 
    {
      current = first; 
    }
  }
}
std::ostream& Eigen::internal::operator<< ( std::ostream &  s,
const Packet16uc &  v 
) [inline]

Definition at line 159 of file AltiVec/PacketMath.h.

{
  union {
    Packet16uc   v;
    unsigned char n[16];
  } vt;
  vt.v = v;
  for (int i=0; i< 16; i++)
    s << (int)vt.n[i] << ", ";
  return s;
}
std::ostream& Eigen::internal::operator<< ( std::ostream &  s,
const Packet4f &  v 
) [inline]

Definition at line 171 of file AltiVec/PacketMath.h.

{
  union {
    Packet4f   v;
    float n[4];
  } vt;
  vt.v = v;
  s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
  return s;
}
std::ostream& Eigen::internal::operator<< ( std::ostream &  s,
const Packet4i &  v 
) [inline]

Definition at line 182 of file AltiVec/PacketMath.h.

{
  union {
    Packet4i   v;
    int n[4];
  } vt;
  vt.v = v;
  s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
  return s;
}
std::ostream& Eigen::internal::operator<< ( std::ostream &  s,
const Packet4ui &  v 
) [inline]

Definition at line 193 of file AltiVec/PacketMath.h.

{
  union {
    Packet4ui   v;
    unsigned int n[4];
  } vt;
  vt.v = v;
  s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
  return s;
}
template<typename IndexType >
static void Eigen::internal::order_children ( IndexType  n_col,
colamd_col< IndexType >  Col[],
IndexType  p[] 
) [inline, static]

Definition at line 1448 of file Ordering.h.

template<typename MatrixType >
void Eigen::internal::ordering_helper_at_plus_a ( const MatrixType &  A,
MatrixType &  symmat 
)
Parameters:
[in]Athe input non-symmetric matrix
[out]symmatthe symmetric pattern A^T+A from the input matrix A. FIXME: The values should not be considered here

Definition at line 27 of file Ordering.h.

{
  MatrixType C;
  C = A.transpose(); // NOTE: Could be  costly
  for (int i = 0; i < C.rows(); i++) 
  {
      for (typename MatrixType::InnerIterator it(C, i); it; ++it)
        it.valueRef() = 0.0;
  }
  symmat = C + A;
}
template<typename Dst , typename Lhs , typename Rhs , typename Func >
EIGEN_DONT_INLINE void Eigen::internal::outer_product_selector_run ( Dst &  dst,
const Lhs &  lhs,
const Rhs &  rhs,
const Func &  func,
const false_type &   
)

Definition at line 246 of file ProductEvaluators.h.

{
  evaluator<Rhs> rhsEval(rhs);
  typename nested_eval<Lhs,Rhs::SizeAtCompileTime>::type actual_lhs(lhs);
  // FIXME if cols is large enough, then it might be useful to make sure that lhs is sequentially stored
  // FIXME not very good if rhs is real and lhs complex while alpha is real too
  const Index cols = dst.cols();
  for (Index j=0; j<cols; ++j)
    func(dst.col(j), rhsEval.coeff(0,j) * actual_lhs);
}
template<typename Dst , typename Lhs , typename Rhs , typename Func >
EIGEN_DONT_INLINE void Eigen::internal::outer_product_selector_run ( Dst &  dst,
const Lhs &  lhs,
const Rhs &  rhs,
const Func &  func,
const true_type &   
)

Definition at line 259 of file ProductEvaluators.h.

{
  evaluator<Lhs> lhsEval(lhs);
  typename nested_eval<Rhs,Lhs::SizeAtCompileTime>::type actual_rhs(rhs);
  // FIXME if rows is large enough, then it might be useful to make sure that rhs is sequentially stored
  // FIXME not very good if lhs is real and rhs complex while alpha is real too
  const Index rows = dst.rows();
  for (Index i=0; i<rows; ++i)
    func(dst.row(i), lhsEval.coeff(i,0) * actual_rhs);
}
template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pabs ( const Packet &  a) [inline]
Returns:
the absolute value of a

Definition at line 184 of file GenericPacketMath.h.

{ using std::abs; return abs(a); }
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::pabs ( const Packet8f &  a)

Definition at line 334 of file AVX/PacketMath.h.

{
  const Packet8f mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
  return _mm256_and_ps(a,mask);
}
template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::pabs ( const Packet4d &  a)

Definition at line 339 of file AVX/PacketMath.h.

{
  const Packet4d mask = _mm256_castsi256_pd(_mm256_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
  return _mm256_and_pd(a,mask);
}
template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::pabs ( const Packet2d &  a)

Definition at line 466 of file SSE/PacketMath.h.

{
  const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
  return _mm_and_pd(a,mask);
}
template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pabs ( const Packet4f &  a)

Definition at line 508 of file AltiVec/PacketMath.h.

{ return vec_abs(a); }
template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pabs ( const Packet4i &  a)

Definition at line 509 of file AltiVec/PacketMath.h.

{ return vec_abs(a); }
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::pacos ( const Packet &  a)
Returns:
the arc cosine of a (coeff-wise)

Definition at line 392 of file GenericPacketMath.h.

{ using std::acos; return acos(a); }
template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::padd ( const Packet &  a,
const Packet &  b 
) [inline]
Returns:
a + b (coeff-wise)

Definition at line 145 of file GenericPacketMath.h.

                         { return a+b; }
template<typename Packet >
DoublePacket<Packet> Eigen::internal::padd ( const DoublePacket< Packet > &  a,
const DoublePacket< Packet > &  b 
)

Definition at line 589 of file GeneralBlockPanelKernel.h.

{
  DoublePacket<Packet> res;
  res.first  = padd(a.first, b.first);
  res.second = padd(a.second,b.second);
  return res;
}
template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::padd< Packet1cd > ( const Packet1cd &  a,
const Packet1cd &  b 
)

Definition at line 301 of file SSE/Complex.h.

{ return Packet1cd(_mm_add_pd(a.v,b.v)); }
template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::padd< Packet2cd > ( const Packet2cd &  a,
const Packet2cd &  b 
)

Definition at line 272 of file AVX/Complex.h.

{ return Packet2cd(_mm256_add_pd(a.v,b.v)); }
template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::padd< Packet2cf > ( const Packet2cf &  a,
const Packet2cf &  b 
)

Definition at line 86 of file AltiVec/Complex.h.

{ return Packet2cf(vec_add(a.v,b.v)); }
template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::padd< Packet2d > ( const Packet2d &  a,
const Packet2d &  b 
)

Definition at line 194 of file SSE/PacketMath.h.

{ return _mm_add_pd(a,b); }
template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::padd< Packet4cf > ( const Packet4cf &  a,
const Packet4cf &  b 
)

Definition at line 50 of file AVX/Complex.h.

{ return Packet4cf(_mm256_add_ps(a.v,b.v)); }
template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::padd< Packet4d > ( const Packet4d &  a,
const Packet4d &  b 
)

Definition at line 125 of file AVX/PacketMath.h.

{ return _mm256_add_pd(a,b); }
template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::padd< Packet4f > ( const Packet4f &  a,
const Packet4f &  b 
)

Definition at line 300 of file AltiVec/PacketMath.h.

{ return vec_add(a,b); }
template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::padd< Packet4i > ( const Packet4i &  a,
const Packet4i &  b 
)

Definition at line 301 of file AltiVec/PacketMath.h.

{ return vec_add(a,b); }
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::padd< Packet8f > ( const Packet8f &  a,
const Packet8f &  b 
)

Definition at line 124 of file AVX/PacketMath.h.

{ return _mm256_add_ps(a,b); }
template<int Offset, typename PacketType >
void Eigen::internal::palign ( PacketType &  first,
const PacketType &  second 
) [inline]

update first using the concatenation of the packet_size minus Offset last elements of first and Offset first elements of second.

This function is currently only used to optimize matrix-vector products on unligned matrices. It takes 2 packets that represent a contiguous memory array, and returns a packet starting at the position Offset. For instance, for packets of 4 elements, we have: Input:

  • first = {f0,f1,f2,f3}
  • second = {s0,s1,s2,s3} Output:
    • if Offset==0 then {f0,f1,f2,f3}
    • if Offset==1 then {f1,f2,f3,s0}
    • if Offset==2 then {f2,f3,s0,s1}
    • if Offset==3 then {f3,s0,s1,s3}

Definition at line 536 of file GenericPacketMath.h.

{
  palign_impl<Offset,PacketType>::run(first,second);
}
template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pand ( const Packet &  a,
const Packet &  b 
) [inline]
Returns:
the bitwise and of a and b

Definition at line 192 of file GenericPacketMath.h.

{ return a & b; }
template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::pand< Packet1cd > ( const Packet1cd &  a,
const Packet1cd &  b 
)

Definition at line 324 of file SSE/Complex.h.

{ return Packet1cd(_mm_and_pd(a.v,b.v)); }
template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::pand< Packet2cd > ( const Packet2cd &  a,
const Packet2cd &  b 
)

Definition at line 291 of file AVX/Complex.h.

{ return Packet2cd(_mm256_and_pd(a.v,b.v)); }
template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pand< Packet2cf > ( const Packet2cf &  a,
const Packet2cf &  b 
)

Definition at line 110 of file AltiVec/Complex.h.

{ return Packet2cf(vec_and(a.v,b.v)); }
template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::pand< Packet2d > ( const Packet2d &  a,
const Packet2d &  b 
)

Definition at line 286 of file SSE/PacketMath.h.

{ return _mm_and_pd(a,b); }
template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::pand< Packet4cf > ( const Packet4cf &  a,
const Packet4cf &  b 
)

Definition at line 70 of file AVX/Complex.h.

{ return Packet4cf(_mm256_and_ps(a.v,b.v)); }
template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::pand< Packet4d > ( const Packet4d &  a,
const Packet4d &  b 
)

Definition at line 196 of file AVX/PacketMath.h.

{ return _mm256_and_pd(a,b); }
template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pand< Packet4f > ( const Packet4f &  a,
const Packet4f &  b 
)

Definition at line 382 of file AltiVec/PacketMath.h.

{ return vec_and(a, b); }
template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pand< Packet4i > ( const Packet4i &  a,
const Packet4i &  b 
)

Definition at line 383 of file AltiVec/PacketMath.h.

{ return vec_and(a, b); }
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::pand< Packet8f > ( const Packet8f &  a,
const Packet8f &  b 
)

Definition at line 195 of file AVX/PacketMath.h.

{ return _mm256_and_ps(a,b); }
template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pandnot ( const Packet &  a,
const Packet &  b 
) [inline]
Returns:
the bitwise andnot of a and b

Definition at line 204 of file GenericPacketMath.h.

{ return a & (!b); }
template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::pandnot< Packet1cd > ( const Packet1cd &  a,
const Packet1cd &  b 
)

Definition at line 327 of file SSE/Complex.h.

{ return Packet1cd(_mm_andnot_pd(a.v,b.v)); }
template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::pandnot< Packet2cd > ( const Packet2cd &  a,
const Packet2cd &  b 
)

Definition at line 294 of file AVX/Complex.h.

{ return Packet2cd(_mm256_andnot_pd(a.v,b.v)); }
template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pandnot< Packet2cf > ( const Packet2cf &  a,
const Packet2cf &  b 
)

Definition at line 113 of file AltiVec/Complex.h.

{ return Packet2cf(vec_and(a.v, vec_nor(b.v,b.v))); }
template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::pandnot< Packet2d > ( const Packet2d &  a,
const Packet2d &  b 
)

Definition at line 298 of file SSE/PacketMath.h.

{ return _mm_andnot_pd(a,b); }
template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::pandnot< Packet4cf > ( const Packet4cf &  a,
const Packet4cf &  b 
)

Definition at line 73 of file AVX/Complex.h.

{ return Packet4cf(_mm256_andnot_ps(a.v,b.v)); }
template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::pandnot< Packet4d > ( const Packet4d &  a,
const Packet4d &  b 
)

Definition at line 205 of file AVX/PacketMath.h.

{ return _mm256_andnot_pd(a,b); }
template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pandnot< Packet4f > ( const Packet4f &  a,
const Packet4f &  b 
)

Definition at line 391 of file AltiVec/PacketMath.h.

{ return vec_and(a, vec_nor(b, b)); }
template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pandnot< Packet4i > ( const Packet4i &  a,
const Packet4i &  b 
)

Definition at line 392 of file AltiVec/PacketMath.h.

{ return vec_and(a, vec_nor(b, b)); }
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::pandnot< Packet8f > ( const Packet8f &  a,
const Packet8f &  b 
)

Definition at line 204 of file AVX/PacketMath.h.

{ return _mm256_andnot_ps(a,b); }
template<bool Condition, typename Functor , typename Index >
void Eigen::internal::parallelize_gemm ( const Functor &  func,
Index  rows,
Index  cols,
bool  transpose 
)

Definition at line 86 of file Parallelizer.h.

{
  // TODO when EIGEN_USE_BLAS is defined,
  // we should still enable OMP for other scalar types
#if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS)
  // FIXME the transpose variable is only needed to properly split
  // the matrix product when multithreading is enabled. This is a temporary
  // fix to support row-major destination matrices. This whole
  // parallelizer mechanism has to be redisigned anyway.
  EIGEN_UNUSED_VARIABLE(transpose);
  func(0,rows, 0,cols);
#else

  // Dynamically check whether we should enable or disable OpenMP.
  // The conditions are:
  // - the max number of threads we can create is greater than 1
  // - we are not already in a parallel code
  // - the sizes are large enough

  // compute the maximal number of threads from the size of the product:
  // FIXME this has to be fine tuned
  Index size = transpose ? rows : cols;
  Index pb_max_threads = std::max<Index>(1,size / 32);
  // compute the number of threads we are going to use
  Index threads = std::min<Index>(nbThreads(), pb_max_threads);

  // if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session,
  // then abort multi-threading
  // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
  if((!Condition) || (threads==1) || (omp_get_num_threads()>1))
    return func(0,rows, 0,cols);

  Eigen::initParallel();
  func.initParallelSession(threads);

  if(transpose)
    std::swap(rows,cols);
  
  ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0);
  
  #pragma omp parallel num_threads(threads)
  {
    Index i = omp_get_thread_num();
    // Note that the actual number of threads might be lower than the number of request ones.
    Index actual_threads = omp_get_num_threads();
    
    Index blockCols = (cols / actual_threads) & ~Index(0x3);
    Index blockRows = (rows / actual_threads);
    blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr;
  
    Index r0 = i*blockRows;
    Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;

    Index c0 = i*blockCols;
    Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;

    info[i].lhs_start = r0;
    info[i].lhs_length = actualBlockRows;

    if(transpose) func(c0, actualBlockCols, 0, rows, info);
    else          func(0, rows, c0, actualBlockCols, info);
  }
#endif
}
template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::parg ( const Packet &  a) [inline]
Returns:
the phase angle of a

Definition at line 188 of file GenericPacketMath.h.

{ using numext::arg; return arg(a); }
template<typename MatrixType , typename TranspositionType >
void Eigen::internal::partial_lu_inplace ( MatrixType &  lu,
TranspositionType &  row_transpositions,
typename TranspositionType::StorageIndex &  nb_transpositions 
)

performs the LU decomposition with partial pivoting in-place.

Definition at line 448 of file PartialPivLU.h.

{
  eigen_assert(lu.cols() == row_transpositions.size());
  eigen_assert((&row_transpositions.coeffRef(1)-&row_transpositions.coeffRef(0)) == 1);

  partial_lu_impl
    <typename MatrixType::Scalar, MatrixType::Flags&RowMajorBit?RowMajor:ColMajor, typename TranspositionType::StorageIndex>
    ::blocked_lu(lu.rows(), lu.cols(), &lu.coeffRef(0,0), lu.outerStride(), &row_transpositions.coeffRef(0), nb_transpositions);
}
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::pasin ( const Packet &  a)
Returns:
the arc sine of a (coeff-wise)

Definition at line 388 of file GenericPacketMath.h.

{ using std::asin; return asin(a); }
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::patan ( const Packet &  a)
Returns:
the arc tangent of a (coeff-wise)

Definition at line 396 of file GenericPacketMath.h.

{ using std::atan; return atan(a); }
template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pblend ( const Selector< 2 > &  ifPacket,
const Packet2cf &  thenPacket,
const Packet2cf &  elsePacket 
)

Definition at line 474 of file SSE/Complex.h.

                                                                                                                                        {
  __m128d result = pblend<Packet2d>(ifPacket, _mm_castps_pd(thenPacket.v), _mm_castps_pd(elsePacket.v));
  return Packet2cf(_mm_castpd_ps(result));
}
template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pblend ( const Selector< unpacket_traits< Packet >::size > &  ifPacket,
const Packet &  thenPacket,
const Packet &  elsePacket 
) [inline]

Definition at line 579 of file GenericPacketMath.h.

                                                                                                                  {
  return ifPacket.select[0] ? thenPacket : elsePacket;
}
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::pblend ( const Selector< 8 > &  ifPacket,
const Packet8f &  thenPacket,
const Packet8f &  elsePacket 
)

Definition at line 591 of file AVX/PacketMath.h.

                                                                                                                                    {
  const __m256 zero = _mm256_setzero_ps();
  const __m256 select = _mm256_set_ps(ifPacket.select[7], ifPacket.select[6], ifPacket.select[5], ifPacket.select[4], ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
  __m256 false_mask = _mm256_cmp_ps(select, zero, _CMP_EQ_UQ);
  return _mm256_blendv_ps(thenPacket, elsePacket, false_mask);
}
template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::pblend ( const Selector< 4 > &  ifPacket,
const Packet4d &  thenPacket,
const Packet4d &  elsePacket 
)

Definition at line 597 of file AVX/PacketMath.h.

                                                                                                                                    {
  const __m256d zero = _mm256_setzero_pd();
  const __m256d select = _mm256_set_pd(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
  __m256d false_mask = _mm256_cmp_pd(select, zero, _CMP_EQ_UQ);
  return _mm256_blendv_pd(thenPacket, elsePacket, false_mask);
}
template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pblend ( const Selector< 4 > &  ifPacket,
const Packet4i &  thenPacket,
const Packet4i &  elsePacket 
)

Definition at line 809 of file SSE/PacketMath.h.

                                                                                                                                    {
  const __m128i zero = _mm_setzero_si128();
  const __m128i select = _mm_set_epi32(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
  __m128i false_mask = _mm_cmpeq_epi32(select, zero);
#ifdef EIGEN_VECTORIZE_SSE4_1
  return _mm_blendv_epi8(thenPacket, elsePacket, false_mask);
#else
  return _mm_or_si128(_mm_andnot_si128(false_mask, thenPacket), _mm_and_si128(false_mask, elsePacket));
#endif
}
template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pblend ( const Selector< 4 > &  ifPacket,
const Packet4f &  thenPacket,
const Packet4f &  elsePacket 
)

Definition at line 819 of file SSE/PacketMath.h.

                                                                                                                                    {
  const __m128 zero = _mm_setzero_ps();
  const __m128 select = _mm_set_ps(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
  __m128 false_mask = _mm_cmpeq_ps(select, zero);
#ifdef EIGEN_VECTORIZE_SSE4_1
  return _mm_blendv_ps(thenPacket, elsePacket, false_mask);
#else
  return _mm_or_ps(_mm_andnot_ps(false_mask, thenPacket), _mm_and_ps(false_mask, elsePacket));
#endif
}
template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::pblend ( const Selector< 2 > &  ifPacket,
const Packet2d &  thenPacket,
const Packet2d &  elsePacket 
)

Definition at line 829 of file SSE/PacketMath.h.

                                                                                                                                    {
  const __m128d zero = _mm_setzero_pd();
  const __m128d select = _mm_set_pd(ifPacket.select[1], ifPacket.select[0]);
  __m128d false_mask = _mm_cmpeq_pd(select, zero);
#ifdef EIGEN_VECTORIZE_SSE4_1
  return _mm_blendv_pd(thenPacket, elsePacket, false_mask);
#else
  return _mm_or_pd(_mm_andnot_pd(false_mask, thenPacket), _mm_and_pd(false_mask, elsePacket));
#endif
}
template<typename Packet >
EIGEN_DEVICE_FUNC void Eigen::internal::pbroadcast2 ( const typename unpacket_traits< Packet >::type a,
Packet &  a0,
Packet &  a1 
) [inline]

equivalent to

 a0 = pload1(a+0);
 a1 = pload1(a+1);
See also:
pset1, pload1, ploaddup, pbroadcast4

Definition at line 267 of file GenericPacketMath.h.

{
  a0 = pload1<Packet>(a+0);
  a1 = pload1<Packet>(a+1);
}
template<typename Packet >
EIGEN_DEVICE_FUNC void Eigen::internal::pbroadcast4 ( const typename unpacket_traits< Packet >::type a,
Packet &  a0,
Packet &  a1,
Packet &  a2,
Packet &  a3 
) [inline]

equivalent to

 a0 = pload1(a+0);
 a1 = pload1(a+1);
 a2 = pload1(a+2);
 a3 = pload1(a+3);
See also:
pset1, pload1, ploaddup, pbroadcast2

Definition at line 250 of file GenericPacketMath.h.

{
  a0 = pload1<Packet>(a+0);
  a1 = pload1<Packet>(a+1);
  a2 = pload1<Packet>(a+2);
  a3 = pload1<Packet>(a+3);
}
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pbroadcast4< Packet2d > ( const double *  a,
Packet2d &  a0,
Packet2d &  a1,
Packet2d &  a2,
Packet2d &  a3 
)

Definition at line 494 of file SSE/PacketMath.h.

{
#ifdef EIGEN_VECTORIZE_SSE3
  a0 = _mm_loaddup_pd(a+0);
  a1 = _mm_loaddup_pd(a+1);
  a2 = _mm_loaddup_pd(a+2);
  a3 = _mm_loaddup_pd(a+3);
#else
  a1 = pload<Packet2d>(a);
  a0 = vec2d_swizzle1(a1, 0,0);
  a1 = vec2d_swizzle1(a1, 1,1);
  a3 = pload<Packet2d>(a+2);
  a2 = vec2d_swizzle1(a3, 0,0);
  a3 = vec2d_swizzle1(a3, 1,1);
#endif
}
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pbroadcast4< Packet4f > ( const float *  a,
Packet4f &  a0,
Packet4f &  a1,
Packet4f &  a2,
Packet4f &  a3 
)

Definition at line 240 of file AltiVec/PacketMath.h.

{
  a3 = pload<Packet4f>(a);
  a0 = vec_splat(a3, 0);
  a1 = vec_splat(a3, 1);
  a2 = vec_splat(a3, 2);
  a3 = vec_splat(a3, 3);
}
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pbroadcast4< Packet4i > ( const int *  a,
Packet4i &  a0,
Packet4i &  a1,
Packet4i &  a2,
Packet4i &  a3 
)

Definition at line 250 of file AltiVec/PacketMath.h.

{
  a3 = pload<Packet4i>(a);
  a0 = vec_splat(a3, 0);
  a1 = vec_splat(a3, 1);
  a2 = vec_splat(a3, 2);
  a3 = vec_splat(a3, 3);
}
template<typename SrcPacket , typename TgtPacket >
EIGEN_DEVICE_FUNC TgtPacket Eigen::internal::pcast ( const SrcPacket &  a) [inline]
Returns:
static_cast<TgtType>(a) (coeff-wise)

Definition at line 128 of file GenericPacketMath.h.

                          {
  return static_cast<TgtPacket>(a);
}
template<typename SrcPacket , typename TgtPacket >
EIGEN_DEVICE_FUNC TgtPacket Eigen::internal::pcast ( const SrcPacket &  a,
const SrcPacket &   
) [inline]

Definition at line 133 of file GenericPacketMath.h.

                                              {
  return static_cast<TgtPacket>(a);
}
template<typename SrcPacket , typename TgtPacket >
EIGEN_DEVICE_FUNC TgtPacket Eigen::internal::pcast ( const SrcPacket &  a,
const SrcPacket &  ,
const SrcPacket &  ,
const SrcPacket &   
) [inline]

Definition at line 139 of file GenericPacketMath.h.

                                                                                      {
  return static_cast<TgtPacket>(a);
}
template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pcast< Packet2d, Packet4f > ( const Packet2d &  a,
const Packet2d &  b 
)

Definition at line 54 of file SSE/TypeCasting.h.

                                                                                                        {
  return _mm_shuffle_ps(_mm_cvtpd_ps(a), _mm_cvtpd_ps(b), (1 << 2) | (1 << 6));
}
template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::pcast< Packet4f, Packet2d > ( const Packet4f &  a)

Definition at line 67 of file SSE/TypeCasting.h.

                                                                                     {
  // Simply discard the second half of the input
  return _mm_cvtps_pd(a);
}
template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pcast< Packet4f, Packet4i > ( const Packet4f &  a)

Definition at line 26 of file SSE/TypeCasting.h.

                                                                                     {
  return _mm_cvttps_epi32(a);
}
template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pcast< Packet4i, Packet4f > ( const Packet4i &  a)

Definition at line 40 of file SSE/TypeCasting.h.

                                                                                     {
  return _mm_cvtepi32_ps(a);
}
template<>
EIGEN_STRONG_INLINE Packet8i Eigen::internal::pcast< Packet8f, Packet8i > ( const Packet8f &  a)

Definition at line 39 of file AVX/TypeCasting.h.

                                                                                     {
  return _mm256_cvtps_epi32(a);
}
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::pcast< Packet8i, Packet8f > ( const Packet8i &  a)

Definition at line 43 of file AVX/TypeCasting.h.

                                                                                     {
  return _mm256_cvtepi32_ps(a);
}
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::pceil ( const Packet &  a)
Returns:
the ceil of a (coeff-wise)

Definition at line 442 of file GenericPacketMath.h.

{ using numext::ceil; return ceil(a); }
template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::pceil< Packet4d > ( const Packet4d &  a)

Definition at line 190 of file AVX/PacketMath.h.

{ return _mm256_ceil_pd(a); }
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::pceil< Packet8f > ( const Packet8f &  a)

Definition at line 189 of file AVX/PacketMath.h.

{ return _mm256_ceil_ps(a); }
template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::pconj ( const Packet4cf &  a)

Definition at line 56 of file AVX/Complex.h.

{
  const __m256 mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000));
  return Packet4cf(_mm256_xor_ps(a.v,mask));
}
template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pconj ( const Packet2cf &  a)

Definition at line 89 of file AltiVec/Complex.h.

{ return Packet2cf((Packet4f)vec_xor((Packet4ui)a.v, p4ui_CONJ_XOR)); }
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::pconj ( const Packet8f &  a)

Definition at line 139 of file AVX/PacketMath.h.

{ return a; }
template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::pconj ( const Packet4d &  a)

Definition at line 140 of file AVX/PacketMath.h.

{ return a; }
template<>
EIGEN_STRONG_INLINE Packet8i Eigen::internal::pconj ( const Packet8i &  a)

Definition at line 141 of file AVX/PacketMath.h.

{ return a; }
template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pconj ( const Packet &  a) [inline]
Returns:
conj(a) (coeff-wise)

Definition at line 160 of file GenericPacketMath.h.

{ return numext::conj(a); }
template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::pconj ( const Packet2d &  a)

Definition at line 217 of file SSE/PacketMath.h.

{ return a; }
template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::pconj ( const Packet2cd &  a)

Definition at line 275 of file AVX/Complex.h.

{
  const __m256d mask = _mm256_castsi256_pd(_mm256_set_epi32(0x80000000,0x0,0x0,0x0,0x80000000,0x0,0x0,0x0));
  return Packet2cd(_mm256_xor_pd(a.v,mask));
}
template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::pconj ( const Packet1cd &  a)

Definition at line 304 of file SSE/Complex.h.

{
  const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));
  return Packet1cd(_mm_xor_pd(a.v,mask));
}
template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pconj ( const Packet4f &  a)

Definition at line 309 of file AltiVec/PacketMath.h.

{ return a; }
template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pconj ( const Packet4i &  a)

Definition at line 310 of file AltiVec/PacketMath.h.

{ return a; }
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::pcos ( const Packet &  a)
Returns:
the cosine of a (coeff-wise)

Definition at line 380 of file GenericPacketMath.h.

{ using std::cos; return cos(a); }

Definition at line 359 of file arch/SSE/MathFunctions.h.

{
  Packet4f x = _x;
  _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
  _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);

  _EIGEN_DECLARE_CONST_Packet4i(1, 1);
  _EIGEN_DECLARE_CONST_Packet4i(not1, ~1);
  _EIGEN_DECLARE_CONST_Packet4i(2, 2);
  _EIGEN_DECLARE_CONST_Packet4i(4, 4);

  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1,-0.78515625f);
  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f);
  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f);
  _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891E-4f);
  _EIGEN_DECLARE_CONST_Packet4f(sincof_p1,  8.3321608736E-3f);
  _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611E-1f);
  _EIGEN_DECLARE_CONST_Packet4f(coscof_p0,  2.443315711809948E-005f);
  _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765E-003f);
  _EIGEN_DECLARE_CONST_Packet4f(coscof_p2,  4.166664568298827E-002f);
  _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI

  Packet4f xmm1, xmm2, xmm3, y;
  Packet4i emm0, emm2;

  x = pabs(x);

  /* scale by 4/Pi */
  y = pmul(x, p4f_cephes_FOPI);

  /* get the integer part of y */
  emm2 = _mm_cvttps_epi32(y);
  /* j=(j+1) & (~1) (see the cephes sources) */
  emm2 = _mm_add_epi32(emm2, p4i_1);
  emm2 = _mm_and_si128(emm2, p4i_not1);
  y = _mm_cvtepi32_ps(emm2);

  emm2 = _mm_sub_epi32(emm2, p4i_2);

  /* get the swap sign flag */
  emm0 = _mm_andnot_si128(emm2, p4i_4);
  emm0 = _mm_slli_epi32(emm0, 29);
  /* get the polynom selection mask */
  emm2 = _mm_and_si128(emm2, p4i_2);
  emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());

  Packet4f sign_bit = _mm_castsi128_ps(emm0);
  Packet4f poly_mask = _mm_castsi128_ps(emm2);

  /* The magic pass: "Extended precision modular arithmetic"
     x = ((x - y * DP1) - y * DP2) - y * DP3; */
  xmm1 = pmul(y, p4f_minus_cephes_DP1);
  xmm2 = pmul(y, p4f_minus_cephes_DP2);
  xmm3 = pmul(y, p4f_minus_cephes_DP3);
  x = padd(x, xmm1);
  x = padd(x, xmm2);
  x = padd(x, xmm3);

  /* Evaluate the first polynom  (0 <= x <= Pi/4) */
  y = p4f_coscof_p0;
  Packet4f z = pmul(x,x);

  y = pmadd(y,z,p4f_coscof_p1);
  y = pmadd(y,z,p4f_coscof_p2);
  y = pmul(y, z);
  y = pmul(y, z);
  Packet4f tmp = _mm_mul_ps(z, p4f_half);
  y = psub(y, tmp);
  y = padd(y, p4f_1);

  /* Evaluate the second polynom  (Pi/4 <= x <= 0) */
  Packet4f y2 = p4f_sincof_p0;
  y2 = pmadd(y2, z, p4f_sincof_p1);
  y2 = pmadd(y2, z, p4f_sincof_p2);
  y2 = pmul(y2, z);
  y2 = pmadd(y2, x, x);

  /* select the correct result from the two polynoms */
  y2 = _mm_and_ps(poly_mask, y2);
  y  = _mm_andnot_ps(poly_mask, y);
  y  = _mm_or_ps(y,y2);

  /* update the sign */
  return _mm_xor_ps(y, sign_bit);
}
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::pcosh ( const Packet &  a)
Returns:
the hyperbolic cosine of a (coeff-wise)

Definition at line 404 of file GenericPacketMath.h.

{ using std::cosh; return cosh(a); }

Definition at line 258 of file SSE/Complex.h.

{
  return Packet2cf(vec4f_swizzle1(x.v, 1, 0, 3, 2));
}
template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pcplxflip ( const Packet &  a) [inline]
Returns:
a with real and imaginary part flipped (for complex type only)

Definition at line 362 of file GenericPacketMath.h.

{
  // FIXME: uncomment the following in case we drop the internal imag and real functions.
//   using std::imag;
//   using std::real;
  return Packet(imag(a),real(a));
}

Definition at line 459 of file SSE/Complex.h.

{
  return Packet1cd(preverse(Packet2d(x.v)));
}
template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::pcplxflip< Packet2cd > ( const Packet2cd &  x)

Definition at line 429 of file AVX/Complex.h.

{
  return Packet2cd(_mm256_shuffle_pd(x.v, x.v, 0x5));
}
template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pcplxflip< Packet2cf > ( const Packet2cf &  x)

Definition at line 234 of file AltiVec/Complex.h.

{
  return Packet2cf(vec_perm(x.v, x.v, p16uc_COMPLEX32_REV));
}
template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::pcplxflip< Packet4cf > ( const Packet4cf &  x)

Definition at line 234 of file AVX/Complex.h.

{
  return Packet4cf(_mm256_shuffle_ps(x.v, x.v, _MM_SHUFFLE(2, 3, 0 ,1)));
}
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::pdigamma ( const Packet &  a)
Returns:
the derivative of lgamma, psi(a) (coeff-wise)

Definition at line 450 of file GenericPacketMath.h.

{ using numext::digamma; return digamma(a); }
template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pdiv ( const Packet &  a,
const Packet &  b 
) [inline]
Returns:
a / b (coeff-wise)

Definition at line 169 of file GenericPacketMath.h.

                         { return a/b; }
template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::pdiv< Packet1cd > ( const Packet1cd &  a,
const Packet1cd &  b 
)

Definition at line 451 of file SSE/Complex.h.

{
  // TODO optimize it for SSE3 and 4
  Packet1cd res = conj_helper<Packet1cd,Packet1cd,false,true>().pmul(a,b);
  __m128d s = _mm_mul_pd(b.v,b.v);
  return Packet1cd(_mm_div_pd(res.v, _mm_add_pd(s,_mm_shuffle_pd(s, s, 0x1))));
}
template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::pdiv< Packet2cd > ( const Packet2cd &  a,
const Packet2cd &  b 
)

Definition at line 421 of file AVX/Complex.h.

{
  Packet2cd num = pmul(a, pconj(b));
  __m256d tmp = _mm256_mul_pd(b.v, b.v);
  __m256d denom = _mm256_hadd_pd(tmp, tmp);
  return Packet2cd(_mm256_div_pd(num.v, denom));
}
template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pdiv< Packet2cf > ( const Packet2cf &  a,
const Packet2cf &  b 
)

Definition at line 226 of file AltiVec/Complex.h.

{
  // TODO optimize it for AltiVec
  Packet2cf res = conj_helper<Packet2cf,Packet2cf,false,true>().pmul(a,b);
  Packet4f s = vec_madd(b.v, b.v, p4f_ZERO);
  return Packet2cf(pdiv(res.v, vec_add(s,vec_perm(s, s, p16uc_COMPLEX32_REV))));
}
template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::pdiv< Packet2d > ( const Packet2d &  a,
const Packet2d &  b 
)

Definition at line 239 of file SSE/PacketMath.h.

{ return _mm_div_pd(a,b); }
template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::pdiv< Packet4cf > ( const Packet4cf &  a,
const Packet4cf &  b 
)

Definition at line 225 of file AVX/Complex.h.

{
  Packet4cf num = pmul(a, pconj(b));
  __m256 tmp = _mm256_mul_ps(b.v, b.v);
  __m256 tmp2    = _mm256_shuffle_ps(tmp,tmp,0xB1);
  __m256 denom = _mm256_add_ps(tmp, tmp2);
  return Packet4cf(_mm256_div_ps(num.v, denom));
}
template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::pdiv< Packet4d > ( const Packet4d &  a,
const Packet4d &  b 
)

Definition at line 148 of file AVX/PacketMath.h.

{ return _mm256_div_pd(a,b); }
template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pdiv< Packet4f > ( const Packet4f &  a,
const Packet4f &  b 
)

Definition at line 349 of file AltiVec/PacketMath.h.

{
#ifndef __VSX__  // VSX actually provides a div instruction
  Packet4f t, y_0, y_1;

  // Altivec does not offer a divide instruction, we have to do a reciprocal approximation
  y_0 = vec_re(b);

  // Do one Newton-Raphson iteration to get the needed accuracy
  t   = vec_nmsub(y_0, b, p4f_ONE);
  y_1 = vec_madd(y_0, t, y_0);

  return vec_madd(a, y_1, p4f_ZERO);
#else
  return vec_div(a, b);
#endif
}
template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pdiv< Packet4i > ( const Packet4i &  ,
const Packet4i &   
)

Definition at line 367 of file AltiVec/PacketMath.h.

{ eigen_assert(false && "packet integer division are not supported by AltiVec");
  return pset1<Packet4i>(0);
}
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::pdiv< Packet8f > ( const Packet8f &  a,
const Packet8f &  b 
)

Definition at line 147 of file AVX/PacketMath.h.

{ return _mm256_div_ps(a,b); }
template<>
EIGEN_STRONG_INLINE Packet8i Eigen::internal::pdiv< Packet8i > ( const Packet8i &  ,
const Packet8i &   
)

Definition at line 149 of file AVX/PacketMath.h.

{ eigen_assert(false && "packet integer division are not supported by AVX");
  return pset1<Packet8i>(0);
}
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::perf ( const Packet &  a)
Returns:
the erf(a) (coeff-wise)

Definition at line 454 of file GenericPacketMath.h.

{ using numext::erf; return erf(a); }
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::perfc ( const Packet &  a)
Returns:
the erfc(a) (coeff-wise)

Definition at line 458 of file GenericPacketMath.h.

{ using numext::erfc; return erfc(a); }
template<int Mode, typename MatrixType , int DestOrder>
void Eigen::internal::permute_symm_to_fullsymm ( const MatrixType &  mat,
SparseMatrix< typename MatrixType::Scalar, DestOrder, typename MatrixType::StorageIndex > &  _dest,
const typename MatrixType::StorageIndex *  perm = 0 
)

Definition at line 384 of file SparseSelfAdjointView.h.

{
  typedef typename MatrixType::StorageIndex StorageIndex;
  typedef typename MatrixType::Scalar Scalar;
  typedef SparseMatrix<Scalar,DestOrder,StorageIndex> Dest;
  typedef Matrix<StorageIndex,Dynamic,1> VectorI;
  typedef evaluator<MatrixType> MatEval;
  typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
  
  MatEval matEval(mat);
  Dest& dest(_dest.derived());
  enum {
    StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
  };
  
  Index size = mat.rows();
  VectorI count;
  count.resize(size);
  count.setZero();
  dest.resize(size,size);
  for(Index j = 0; j<size; ++j)
  {
    Index jp = perm ? perm[j] : j;
    for(MatIterator it(matEval,j); it; ++it)
    {
      Index i = it.index();
      Index r = it.row();
      Index c = it.col();
      Index ip = perm ? perm[i] : i;
      if(Mode==(Upper|Lower))
        count[StorageOrderMatch ? jp : ip]++;
      else if(r==c)
        count[ip]++;
      else if(( Mode==Lower && r>c) || ( Mode==Upper && r<c))
      {
        count[ip]++;
        count[jp]++;
      }
    }
  }
  Index nnz = count.sum();
  
  // reserve space
  dest.resizeNonZeros(nnz);
  dest.outerIndexPtr()[0] = 0;
  for(Index j=0; j<size; ++j)
    dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
  for(Index j=0; j<size; ++j)
    count[j] = dest.outerIndexPtr()[j];
  
  // copy data
  for(StorageIndex j = 0; j<size; ++j)
  {
    for(MatIterator it(matEval,j); it; ++it)
    {
      StorageIndex i = internal::convert_index<StorageIndex>(it.index());
      Index r = it.row();
      Index c = it.col();
      
      StorageIndex jp = perm ? perm[j] : j;
      StorageIndex ip = perm ? perm[i] : i;
      
      if(Mode==(Upper|Lower))
      {
        Index k = count[StorageOrderMatch ? jp : ip]++;
        dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
        dest.valuePtr()[k] = it.value();
      }
      else if(r==c)
      {
        Index k = count[ip]++;
        dest.innerIndexPtr()[k] = ip;
        dest.valuePtr()[k] = it.value();
      }
      else if(( (Mode&Lower)==Lower && r>c) || ( (Mode&Upper)==Upper && r<c))
      {
        if(!StorageOrderMatch)
          std::swap(ip,jp);
        Index k = count[jp]++;
        dest.innerIndexPtr()[k] = ip;
        dest.valuePtr()[k] = it.value();
        k = count[ip]++;
        dest.innerIndexPtr()[k] = jp;
        dest.valuePtr()[k] = numext::conj(it.value());
      }
    }
  }
}
template<int SrcMode, int DstMode, typename MatrixType , int DestOrder>
void Eigen::internal::permute_symm_to_symm ( const MatrixType &  mat,
SparseMatrix< typename MatrixType::Scalar, DestOrder, typename MatrixType::StorageIndex > &  _dest,
const typename MatrixType::StorageIndex *  perm = 0 
)
template<int _SrcMode, int _DstMode, typename MatrixType , int DstOrder>
void Eigen::internal::permute_symm_to_symm ( const MatrixType &  mat,
SparseMatrix< typename MatrixType::Scalar, DstOrder, typename MatrixType::StorageIndex > &  _dest,
const typename MatrixType::StorageIndex *  perm 
)

Definition at line 474 of file SparseSelfAdjointView.h.

{
  typedef typename MatrixType::StorageIndex StorageIndex;
  typedef typename MatrixType::Scalar Scalar;
  SparseMatrix<Scalar,DstOrder,StorageIndex>& dest(_dest.derived());
  typedef Matrix<StorageIndex,Dynamic,1> VectorI;
  typedef evaluator<MatrixType> MatEval;
  typedef typename evaluator<MatrixType>::InnerIterator MatIterator;

  enum {
    SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
    StorageOrderMatch = int(SrcOrder) == int(DstOrder),
    DstMode = DstOrder==RowMajor ? (_DstMode==Upper ? Lower : Upper) : _DstMode,
    SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode
  };

  MatEval matEval(mat);
  
  Index size = mat.rows();
  VectorI count(size);
  count.setZero();
  dest.resize(size,size);
  for(StorageIndex j = 0; j<size; ++j)
  {
    StorageIndex jp = perm ? perm[j] : j;
    for(MatIterator it(matEval,j); it; ++it)
    {
      StorageIndex i = it.index();
      if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
        continue;
                  
      StorageIndex ip = perm ? perm[i] : i;
      count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
    }
  }
  dest.outerIndexPtr()[0] = 0;
  for(Index j=0; j<size; ++j)
    dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
  dest.resizeNonZeros(dest.outerIndexPtr()[size]);
  for(Index j=0; j<size; ++j)
    count[j] = dest.outerIndexPtr()[j];
  
  for(StorageIndex j = 0; j<size; ++j)
  {
    
    for(MatIterator it(matEval,j); it; ++it)
    {
      StorageIndex i = it.index();
      if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
        continue;
                  
      StorageIndex jp = perm ? perm[j] : j;
      StorageIndex ip = perm? perm[i] : i;
      
      Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
      dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
      
      if(!StorageOrderMatch) std::swap(ip,jp);
      if( ((int(DstMode)==int(Lower) && ip<jp) || (int(DstMode)==int(Upper) && ip>jp)))
        dest.valuePtr()[k] = numext::conj(it.value());
      else
        dest.valuePtr()[k] = it.value();
    }
  }
}
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::pexp ( const Packet &  a)
Returns:
the exp of a (coeff-wise)

Definition at line 412 of file GenericPacketMath.h.

{ using std::exp; return exp(a); }

Definition at line 172 of file arch/SSE/MathFunctions.h.

{
  Packet2d x = _x;

  _EIGEN_DECLARE_CONST_Packet2d(1 , 1.0);
  _EIGEN_DECLARE_CONST_Packet2d(2 , 2.0);
  _EIGEN_DECLARE_CONST_Packet2d(half, 0.5);

  _EIGEN_DECLARE_CONST_Packet2d(exp_hi,  709.437);
  _EIGEN_DECLARE_CONST_Packet2d(exp_lo, -709.436139303);

  _EIGEN_DECLARE_CONST_Packet2d(cephes_LOG2EF, 1.4426950408889634073599);

  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p0, 1.26177193074810590878e-4);
  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p1, 3.02994407707441961300e-2);
  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p2, 9.99999999999999999910e-1);

  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q0, 3.00198505138664455042e-6);
  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q1, 2.52448340349684104192e-3);
  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q2, 2.27265548208155028766e-1);
  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q3, 2.00000000000000000009e0);

  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C1, 0.693145751953125);
  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6);
  static const __m128i p4i_1023_0 = _mm_setr_epi32(1023, 1023, 0, 0);

  Packet2d tmp, fx;
  Packet4i emm0;

  // clamp x
  x = pmax(pmin(x, p2d_exp_hi), p2d_exp_lo);
  /* express exp(x) as exp(g + n*log(2)) */
  fx = pmadd(p2d_cephes_LOG2EF, x, p2d_half);

#ifdef EIGEN_VECTORIZE_SSE4_1
  fx = _mm_floor_pd(fx);
#else
  emm0 = _mm_cvttpd_epi32(fx);
  tmp  = _mm_cvtepi32_pd(emm0);
  /* if greater, substract 1 */
  Packet2d mask = _mm_cmpgt_pd(tmp, fx);
  mask = _mm_and_pd(mask, p2d_1);
  fx = psub(tmp, mask);
#endif

  tmp = pmul(fx, p2d_cephes_exp_C1);
  Packet2d z = pmul(fx, p2d_cephes_exp_C2);
  x = psub(x, tmp);
  x = psub(x, z);

  Packet2d x2 = pmul(x,x);

  Packet2d px = p2d_cephes_exp_p0;
  px = pmadd(px, x2, p2d_cephes_exp_p1);
  px = pmadd(px, x2, p2d_cephes_exp_p2);
  px = pmul (px, x);

  Packet2d qx = p2d_cephes_exp_q0;
  qx = pmadd(qx, x2, p2d_cephes_exp_q1);
  qx = pmadd(qx, x2, p2d_cephes_exp_q2);
  qx = pmadd(qx, x2, p2d_cephes_exp_q3);

  x = pdiv(px,psub(qx,px));
  x = pmadd(p2d_2,x,p2d_1);

  // build 2^n
  emm0 = _mm_cvttpd_epi32(fx);
  emm0 = _mm_add_epi32(emm0, p4i_1023_0);
  emm0 = _mm_slli_epi32(emm0, 20);
  emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(1,2,0,3));
  return pmax(pmul(x, Packet2d(_mm_castsi128_pd(emm0))), _x);
}

Definition at line 319 of file arch/AVX/MathFunctions.h.

                                   {
  Packet4d x = _x;

  _EIGEN_DECLARE_CONST_Packet4d(1, 1.0);
  _EIGEN_DECLARE_CONST_Packet4d(2, 2.0);
  _EIGEN_DECLARE_CONST_Packet4d(half, 0.5);

  _EIGEN_DECLARE_CONST_Packet4d(exp_hi, 709.437);
  _EIGEN_DECLARE_CONST_Packet4d(exp_lo, -709.436139303);

  _EIGEN_DECLARE_CONST_Packet4d(cephes_LOG2EF, 1.4426950408889634073599);

  _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_p0, 1.26177193074810590878e-4);
  _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_p1, 3.02994407707441961300e-2);
  _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_p2, 9.99999999999999999910e-1);

  _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q0, 3.00198505138664455042e-6);
  _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q1, 2.52448340349684104192e-3);
  _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q2, 2.27265548208155028766e-1);
  _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q3, 2.00000000000000000009e0);

  _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_C1, 0.693145751953125);
  _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_C2, 1.42860682030941723212e-6);
  _EIGEN_DECLARE_CONST_Packet4i(1023, 1023);

  Packet4d tmp, fx;

  // clamp x
  x = pmax(pmin(x, p4d_exp_hi), p4d_exp_lo);
  // Express exp(x) as exp(g + n*log(2)).
  fx = pmadd(p4d_cephes_LOG2EF, x, p4d_half);

  // Get the integer modulus of log(2), i.e. the "n" described above.
  fx = _mm256_floor_pd(fx);

  // Get the remainder modulo log(2), i.e. the "g" described above. Subtract
  // n*log(2) out in two steps, i.e. n*C1 + n*C2, C1+C2=log2 to get the last
  // digits right.
  tmp = pmul(fx, p4d_cephes_exp_C1);
  Packet4d z = pmul(fx, p4d_cephes_exp_C2);
  x = psub(x, tmp);
  x = psub(x, z);

  Packet4d x2 = pmul(x, x);

  // Evaluate the numerator polynomial of the rational interpolant.
  Packet4d px = p4d_cephes_exp_p0;
  px = pmadd(px, x2, p4d_cephes_exp_p1);
  px = pmadd(px, x2, p4d_cephes_exp_p2);
  px = pmul(px, x);

  // Evaluate the denominator polynomial of the rational interpolant.
  Packet4d qx = p4d_cephes_exp_q0;
  qx = pmadd(qx, x2, p4d_cephes_exp_q1);
  qx = pmadd(qx, x2, p4d_cephes_exp_q2);
  qx = pmadd(qx, x2, p4d_cephes_exp_q3);

  // I don't really get this bit, copied from the SSE2 routines, so...
  // TODO(gonnet): Figure out what is going on here, perhaps find a better
  // rational interpolant?
  x = _mm256_div_pd(px, psub(qx, px));
  x = pmadd(p4d_2, x, p4d_1);

  // Build e=2^n by constructing the exponents in a 128-bit vector and
  // shifting them to where they belong in double-precision values.
  __m128i emm0 = _mm256_cvtpd_epi32(fx);
  emm0 = _mm_add_epi32(emm0, p4i_1023);
  emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(3, 1, 2, 0));
  __m128i lo = _mm_slli_epi64(emm0, 52);
  __m128i hi = _mm_slli_epi64(_mm_srli_epi64(emm0, 32), 52);
  __m256i e = _mm256_insertf128_si256(_mm256_setzero_si256(), lo, 0);
  e = _mm256_insertf128_si256(e, hi, 1);

  // Construct the result 2^n * exp(g) = e * x. The max is used to catch
  // non-finite values in the input.
  return pmax(pmul(x, _mm256_castsi256_pd(e)), _x);
}

Definition at line 112 of file arch/AltiVec/MathFunctions.h.

{
  Packet4f x = _x;
  _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
  _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
  _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);
  _EIGEN_DECLARE_CONST_Packet4i(23, 23);


  _EIGEN_DECLARE_CONST_Packet4f(exp_hi,  88.3762626647950f);
  _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f);

  _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f);
  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f);
  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f);

  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f);
  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f);
  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f);
  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f);
  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f);
  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f);

  Packet4f tmp, fx;
  Packet4i emm0;

  // clamp x
  x = vec_max(vec_min(x, p4f_exp_hi), p4f_exp_lo);

  /* express exp(x) as exp(g + n*log(2)) */
  fx = pmadd(x, p4f_cephes_LOG2EF, p4f_half);

  fx = vec_floor(fx);

  tmp = pmul(fx, p4f_cephes_exp_C1);
  Packet4f z = pmul(fx, p4f_cephes_exp_C2);
  x = psub(x, tmp);
  x = psub(x, z);

  z = pmul(x,x);

  Packet4f y = p4f_cephes_exp_p0;
  y = pmadd(y, x, p4f_cephes_exp_p1);
  y = pmadd(y, x, p4f_cephes_exp_p2);
  y = pmadd(y, x, p4f_cephes_exp_p3);
  y = pmadd(y, x, p4f_cephes_exp_p4);
  y = pmadd(y, x, p4f_cephes_exp_p5);
  y = pmadd(y, z, x);
  y = padd(y, p4f_1);

  // build 2^n
  emm0 = vec_cts(fx, 0);
  emm0 = vec_add(emm0, p4i_0x7f);
  emm0 = vec_sl(emm0, reinterpret_cast<Packet4ui>(p4i_23));

  // Altivec's max & min operators just drop silent NaNs. Check NaNs in 
  // inputs and return them unmodified.
  Packet4ui isnumber_mask = reinterpret_cast<Packet4ui>(vec_cmpeq(_x, _x));
  return vec_sel(_x, pmax(pmul(y, reinterpret_cast<Packet4f>(emm0)), _x),
                 isnumber_mask);
}

Definition at line 209 of file arch/AVX/MathFunctions.h.

                                   {
  _EIGEN_DECLARE_CONST_Packet8f(1, 1.0f);
  _EIGEN_DECLARE_CONST_Packet8f(half, 0.5f);
  _EIGEN_DECLARE_CONST_Packet8f(127, 127.0f);

  _EIGEN_DECLARE_CONST_Packet8f(exp_hi, 88.3762626647950f);
  _EIGEN_DECLARE_CONST_Packet8f(exp_lo, -88.3762626647949f);

  _EIGEN_DECLARE_CONST_Packet8f(cephes_LOG2EF, 1.44269504088896341f);

  _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p0, 1.9875691500E-4f);
  _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p1, 1.3981999507E-3f);
  _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p2, 8.3334519073E-3f);
  _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p3, 4.1665795894E-2f);
  _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p4, 1.6666665459E-1f);
  _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p5, 5.0000001201E-1f);

  // Clamp x.
  Packet8f x = pmax(pmin(_x, p8f_exp_hi), p8f_exp_lo);

  // Express exp(x) as exp(m*ln(2) + r), start by extracting
  // m = floor(x/ln(2) + 0.5).
  Packet8f m = _mm256_floor_ps(pmadd(x, p8f_cephes_LOG2EF, p8f_half));

// Get r = x - m*ln(2). If no FMA instructions are available, m*ln(2) is
// subtracted out in two parts, m*C1+m*C2 = m*ln(2), to avoid accumulating
// truncation errors. Note that we don't use the "pmadd" function here to
// ensure that a precision-preserving FMA instruction is used.
#ifdef EIGEN_VECTORIZE_FMA
  _EIGEN_DECLARE_CONST_Packet8f(nln2, -0.6931471805599453f);
  Packet8f r = _mm256_fmadd_ps(m, p8f_nln2, x);
#else
  _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_C1, 0.693359375f);
  _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_C2, -2.12194440e-4f);
  Packet8f r = psub(x, pmul(m, p8f_cephes_exp_C1));
  r = psub(r, pmul(m, p8f_cephes_exp_C2));
#endif

  Packet8f r2 = pmul(r, r);

  // TODO(gonnet): Split into odd/even polynomials and try to exploit
  //               instruction-level parallelism.
  Packet8f y = p8f_cephes_exp_p0;
  y = pmadd(y, r, p8f_cephes_exp_p1);
  y = pmadd(y, r, p8f_cephes_exp_p2);
  y = pmadd(y, r, p8f_cephes_exp_p3);
  y = pmadd(y, r, p8f_cephes_exp_p4);
  y = pmadd(y, r, p8f_cephes_exp_p5);
  y = pmadd(y, r2, r);
  y = padd(y, p8f_1);

  // Build emm0 = 2^m.
  Packet8i emm0 = _mm256_cvttps_epi32(padd(m, p8f_127));
  emm0 = pshiftleft(emm0, 23);

  // Return 2^m * exp(r).
  return pmax(pmul(y, _mm256_castsi256_ps(emm0)), _x);
}
template<typename Packet >
EIGEN_DEVICE_FUNC unpacket_traits<Packet>::type Eigen::internal::pfirst ( const Packet &  a) [inline]
Returns:
the first element of a packet

Definition at line 309 of file GenericPacketMath.h.

{ return a; }
template<>
EIGEN_STRONG_INLINE std::complex<double> Eigen::internal::pfirst< Packet1cd > ( const Packet1cd &  a)

Definition at line 345 of file SSE/Complex.h.

{
  EIGEN_ALIGN16 double res[2];
  _mm_store_pd(res, a.v);
  return std::complex<double>(res[0],res[1]);
}
template<>
EIGEN_STRONG_INLINE std::complex<double> Eigen::internal::pfirst< Packet2cd > ( const Packet2cd &  a)

Definition at line 327 of file AVX/Complex.h.

{
  __m128d low = _mm256_extractf128_pd(a.v, 0);
  EIGEN_ALIGN16 double res[2];
  _mm_store_pd(res, low);
  return std::complex<double>(res[0],res[1]);
}
template<>
EIGEN_STRONG_INLINE std::complex< float > Eigen::internal::pfirst< Packet2cf > ( const Packet2cf &  a)

Definition at line 128 of file AltiVec/Complex.h.

{
  std::complex<float> EIGEN_ALIGN16 res[2];
  pstore((float *)&res, a.v);

  return res[0];
}
template<>
EIGEN_STRONG_INLINE double Eigen::internal::pfirst< Packet2d > ( const Packet2d &  a)

Definition at line 426 of file SSE/PacketMath.h.

{ return _mm_cvtsd_f64(a); }
template<>
EIGEN_STRONG_INLINE std::complex<float> Eigen::internal::pfirst< Packet4cf > ( const Packet4cf &  a)

Definition at line 119 of file AVX/Complex.h.

{
  return pfirst(Packet2cf(_mm256_castps256_ps128(a.v)));
}
template<>
EIGEN_STRONG_INLINE double Eigen::internal::pfirst< Packet4d > ( const Packet4d &  a)

Definition at line 311 of file AVX/PacketMath.h.

                                                                          {
  return _mm_cvtsd_f64(_mm256_castpd256_pd128(a));
}
template<>
EIGEN_STRONG_INLINE float Eigen::internal::pfirst< Packet4f > ( const Packet4f &  a)

Definition at line 502 of file AltiVec/PacketMath.h.

{ float EIGEN_ALIGN16 x[4]; vec_st(a, 0, x); return x[0]; }
template<>
EIGEN_STRONG_INLINE int Eigen::internal::pfirst< Packet4i > ( const Packet4i &  a)

Definition at line 503 of file AltiVec/PacketMath.h.

{ int   EIGEN_ALIGN16 x[4]; vec_st(a, 0, x); return x[0]; }
template<>
EIGEN_STRONG_INLINE float Eigen::internal::pfirst< Packet8f > ( const Packet8f &  a)

Definition at line 308 of file AVX/PacketMath.h.

                                                                          {
  return _mm_cvtss_f32(_mm256_castps256_ps128(a));
}
template<>
EIGEN_STRONG_INLINE int Eigen::internal::pfirst< Packet8i > ( const Packet8i &  a)

Definition at line 314 of file AVX/PacketMath.h.

                                                                          {
  return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));
}
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::pfloor ( const Packet &  a)
Returns:
the floor of a (coeff-wise)

Definition at line 438 of file GenericPacketMath.h.

{ using numext::floor; return floor(a); }
template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::pfloor< Packet4d > ( const Packet4d &  a)

Definition at line 193 of file AVX/PacketMath.h.

{ return _mm256_floor_pd(a); }
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::pfloor< Packet8f > ( const Packet8f &  a)

Definition at line 192 of file AVX/PacketMath.h.

{ return _mm256_floor_ps(a); }
template<typename Scalar , typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pgather ( const Scalar *  from,
Index   
) [inline]

Definition at line 286 of file GenericPacketMath.h.

 { return ploadu<Packet>(from); }
template<>
EIGEN_DEVICE_FUNC Packet2d Eigen::internal::pgather< double, Packet2d > ( const double *  from,
Index  stride 
) [inline]

Definition at line 365 of file SSE/PacketMath.h.

{
 return _mm_set_pd(from[1*stride], from[0*stride]);
}
template<>
EIGEN_DEVICE_FUNC Packet4d Eigen::internal::pgather< double, Packet4d > ( const double *  from,
Index  stride 
) [inline]

Definition at line 259 of file AVX/PacketMath.h.

{
  return _mm256_set_pd(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
}
template<>
EIGEN_DEVICE_FUNC Packet4f Eigen::internal::pgather< float, Packet4f > ( const float *  from,
Index  stride 
) [inline]

Definition at line 260 of file AltiVec/PacketMath.h.

{
  float EIGEN_ALIGN16 af[4];
  af[0] = from[0*stride];
  af[1] = from[1*stride];
  af[2] = from[2*stride];
  af[3] = from[3*stride];
 return pload<Packet4f>(af);
}
template<>
EIGEN_DEVICE_FUNC Packet8f Eigen::internal::pgather< float, Packet8f > ( const float *  from,
Index  stride 
) [inline]

Definition at line 254 of file AVX/PacketMath.h.

{
  return _mm256_set_ps(from[7*stride], from[6*stride], from[5*stride], from[4*stride],
                       from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
}
template<>
EIGEN_DEVICE_FUNC Packet4i Eigen::internal::pgather< int, Packet4i > ( const int *  from,
Index  stride 
) [inline]

Definition at line 269 of file AltiVec/PacketMath.h.

{
  int EIGEN_ALIGN16 ai[4];
  ai[0] = from[0*stride];
  ai[1] = from[1*stride];
  ai[2] = from[2*stride];
  ai[3] = from[3*stride];
 return pload<Packet4i>(ai);
}
template<>
EIGEN_DEVICE_FUNC Packet2cd Eigen::internal::pgather< std::complex< double >, Packet2cd > ( const std::complex< double > *  from,
Index  stride 
) [inline]

Definition at line 313 of file AVX/Complex.h.

{
  return Packet2cd(_mm256_set_pd(std::imag(from[1*stride]), std::real(from[1*stride]),
                 std::imag(from[0*stride]), std::real(from[0*stride])));
}
template<>
EIGEN_DEVICE_FUNC Packet2cf Eigen::internal::pgather< std::complex< float >, Packet2cf > ( const std::complex< float > *  from,
Index  stride 
) [inline]

Definition at line 70 of file AltiVec/Complex.h.

{
  std::complex<float> EIGEN_ALIGN16 af[2];
  af[0] = from[0*stride];
  af[1] = from[1*stride];
  return Packet2cf(vec_ld(0, (const float*)af));
}
template<>
EIGEN_DEVICE_FUNC Packet4cf Eigen::internal::pgather< std::complex< float >, Packet4cf > ( const std::complex< float > *  from,
Index  stride 
) [inline]

Definition at line 95 of file AVX/Complex.h.

{
  return Packet4cf(_mm256_set_ps(std::imag(from[3*stride]), std::real(from[3*stride]),
                                 std::imag(from[2*stride]), std::real(from[2*stride]),
                                 std::imag(from[1*stride]), std::real(from[1*stride]),
                                 std::imag(from[0*stride]), std::real(from[0*stride])));
}
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::plgamma ( const Packet &  a)
Returns:
the ln(|gamma(a)|) (coeff-wise)

Definition at line 446 of file GenericPacketMath.h.

{ using numext::lgamma; return lgamma(a); }
template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pload ( const typename unpacket_traits< Packet >::type from) [inline]
Returns:
a packet version of *from, from must be 16 bytes aligned

Definition at line 208 of file GenericPacketMath.h.

{ return *from; }
template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pload1 ( const typename unpacket_traits< Packet >::type a) [inline]
Returns:
a packet with constant coefficients a[0], e.g.: (a[0],a[0],a[0],a[0])

Definition at line 220 of file GenericPacketMath.h.

{ return pset1<Packet>(*a); }
template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::pload1< Packet4d > ( const double *  from)

Definition at line 119 of file AVX/PacketMath.h.

{ return _mm256_broadcast_sd(from); }
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::pload1< Packet8f > ( const float *  from)

Definition at line 118 of file AVX/PacketMath.h.

{ return _mm256_broadcast_ss(from); }
template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::pload< Packet1cd > ( const std::complex< double > *  from)

Definition at line 330 of file SSE/Complex.h.

{ EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload<Packet2d>((const double*)from)); }
template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::pload< Packet2cd > ( const std::complex< double > *  from)

Definition at line 296 of file AVX/Complex.h.

{ EIGEN_DEBUG_ALIGNED_LOAD return Packet2cd(pload<Packet4d>((const double*)from)); }
template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pload< Packet2cf > ( const std::complex< float > *  from)

Definition at line 115 of file AltiVec/Complex.h.

{ EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>((const float*)from)); }
template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::pload< Packet2d > ( const double *  from)

Definition at line 302 of file SSE/PacketMath.h.

{ EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); }
template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::pload< Packet4cf > ( const std::complex< float > *  from)

Definition at line 75 of file AVX/Complex.h.

{ EIGEN_DEBUG_ALIGNED_LOAD return Packet4cf(pload<Packet8f>(&numext::real_ref(*from))); }
template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::pload< Packet4d > ( const double *  from)

Definition at line 208 of file AVX/PacketMath.h.

{ EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_pd(from); }
template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pload< Packet4f > ( const float *  from)

Definition at line 217 of file AltiVec/PacketMath.h.

{ EIGEN_DEBUG_ALIGNED_LOAD return vec_ld(0, from); }
template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pload< Packet4i > ( const int *  from)

Definition at line 218 of file AltiVec/PacketMath.h.

{ EIGEN_DEBUG_ALIGNED_LOAD return vec_ld(0, from); }
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::pload< Packet8f > ( const float *  from)

Definition at line 207 of file AVX/PacketMath.h.

{ EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_ps(from); }
template<>
EIGEN_STRONG_INLINE Packet8i Eigen::internal::pload< Packet8i > ( const int *  from)

Definition at line 209 of file AVX/PacketMath.h.

{ EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(reinterpret_cast<const __m256i*>(from)); }
template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::ploaddup ( const typename unpacket_traits< Packet >::type from) [inline]
Returns:
a packet with elements of *from duplicated. For instance, for a packet of 8 elements, 4 scalars will be read from *from and duplicated to form: {from[0],from[0],from[1],from[1],from[2],from[2],from[3],from[3]} Currently, this function is only used for scalar * complex products.

Definition at line 228 of file GenericPacketMath.h.

{ return *from; }
template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::ploaddup< Packet1cd > ( const std::complex< double > *  from)

Definition at line 337 of file SSE/Complex.h.

{ return pset1<Packet1cd>(*from); }
template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::ploaddup< Packet2cd > ( const std::complex< double > *  from)

Definition at line 308 of file AVX/Complex.h.

{ return pset1<Packet2cd>(*from); }
template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::ploaddup< Packet2cf > ( const std::complex< float > *  from)

Definition at line 118 of file AltiVec/Complex.h.

{
  return pset1<Packet2cf>(*from);
}
template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::ploaddup< Packet2d > ( const double *  from)

Definition at line 344 of file SSE/PacketMath.h.

{ return pset1<Packet2d>(from[0]); }
template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::ploaddup< Packet4cf > ( const std::complex< float > *  from)

Definition at line 84 of file AVX/Complex.h.

{
  // FIXME The following might be optimized using _mm256_movedup_pd
  Packet2cf a = ploaddup<Packet2cf>(from);
  Packet2cf b = ploaddup<Packet2cf>(from+1);
  return  Packet4cf(_mm256_insertf128_ps(_mm256_castps128_ps256(a.v), b.v, 1));
}
template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::ploaddup< Packet4d > ( const double *  from)

Definition at line 231 of file AVX/PacketMath.h.

{
  Packet4d tmp = _mm256_broadcast_pd((const __m128d*)(const void*)from);
  return  _mm256_permute_pd(tmp, 3<<2);
}
template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::ploaddup< Packet4f > ( const float *  from)

Definition at line 431 of file AltiVec/PacketMath.h.

{
  Packet4f p;
  if((ptrdiff_t(from) % 16) == 0)  p = pload<Packet4f>(from);
  else                             p = ploadu<Packet4f>(from);
  return vec_perm(p, p, p16uc_DUPLICATE32_HI);
}
template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::ploaddup< Packet4i > ( const int *  from)

Definition at line 438 of file AltiVec/PacketMath.h.

{
  Packet4i p;
  if((ptrdiff_t(from) % 16) == 0)  p = pload<Packet4i>(from);
  else                             p = ploadu<Packet4i>(from);
  return vec_perm(p, p, p16uc_DUPLICATE32_HI);
}
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::ploaddup< Packet8f > ( const float *  from)

Definition at line 216 of file AVX/PacketMath.h.

{
  // TODO try to find a way to avoid the need of a temporary register
//   Packet8f tmp  = _mm256_castps128_ps256(_mm_loadu_ps(from));
//   tmp = _mm256_insertf128_ps(tmp, _mm_movehl_ps(_mm256_castps256_ps128(tmp),_mm256_castps256_ps128(tmp)), 1);
//   return _mm256_unpacklo_ps(tmp,tmp);
  
  // _mm256_insertf128_ps is very slow on Haswell, thus:
  Packet8f tmp = _mm256_broadcast_ps((const __m128*)(const void*)from);
  // mimic an "inplace" permutation of the lower 128bits using a blend
  tmp = _mm256_blend_ps(tmp,_mm256_castps128_ps256(_mm_permute_ps( _mm256_castps256_ps128(tmp), _MM_SHUFFLE(1,0,1,0))), 15);
  // then we can perform a consistent permutation on the global register to get everything in shape:
  return  _mm256_permute_ps(tmp, _MM_SHUFFLE(3,3,2,2));
}
template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::ploadquad ( const typename unpacket_traits< Packet >::type from) [inline]
Returns:
a packet with elements of *from quadrupled. For instance, for a packet of 8 elements, 2 scalars will be read from *from and replicated to form: {from[0],from[0],from[0],from[0],from[1],from[1],from[1],from[1]} Currently, this function is only used in matrix products. For packet-size smaller or equal to 4, this function is equivalent to pload1

Definition at line 237 of file GenericPacketMath.h.

{ return pload1<Packet>(from); }
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::ploadquad< Packet8f > ( const float *  from)

Definition at line 238 of file AVX/PacketMath.h.

{
  Packet8f tmp = _mm256_castps128_ps256(_mm_broadcast_ss(from));
  return _mm256_insertf128_ps(tmp, _mm_broadcast_ss(from+1), 1);
}
template<typename Packet , int Alignment>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet Eigen::internal::ploadt ( const typename unpacket_traits< Packet >::type from)
Returns:
a packet version of *from. The pointer from must be aligned on a Alignment bytes boundary.

Definition at line 482 of file GenericPacketMath.h.

{
  if(Alignment >= unpacket_traits<Packet>::alignment)
    return pload<Packet>(from);
  else
    return ploadu<Packet>(from);
}
template<typename Packet , int LoadMode>
Packet Eigen::internal::ploadt_ro ( const typename unpacket_traits< Packet >::type from) [inline]
Returns:
a packet version of *from. Unlike ploadt, ploadt_ro takes advantage of the read-only memory path on the hardware if available to speedup the loading of data that won't be modified by the current computation.

Definition at line 507 of file GenericPacketMath.h.

{
  return ploadt<Packet, LoadMode>(from);
}
template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::ploadu ( const typename unpacket_traits< Packet >::type from) [inline]
Returns:
a packet version of *from, (un-aligned load)

Definition at line 212 of file GenericPacketMath.h.

{ return *from; }
template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::ploadu< Packet1cd > ( const std::complex< double > *  from)

Definition at line 332 of file SSE/Complex.h.

{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu<Packet2d>((const double*)from)); }
template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::ploadu< Packet2cd > ( const std::complex< double > *  from)

Definition at line 298 of file AVX/Complex.h.

{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cd(ploadu<Packet4d>((const double*)from)); }
template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::ploadu< Packet2cf > ( const std::complex< float > *  from)

Definition at line 116 of file AltiVec/Complex.h.

{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>((const float*)from)); }
template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::ploadu< Packet2d > ( const double *  from)

Definition at line 328 of file SSE/PacketMath.h.

{
  EIGEN_DEBUG_UNALIGNED_LOAD
  return _mm_loadu_pd(from);
}
template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::ploadu< Packet4cf > ( const std::complex< float > *  from)

Definition at line 76 of file AVX/Complex.h.

{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet4cf(ploadu<Packet8f>(&numext::real_ref(*from))); }
template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::ploadu< Packet4d > ( const double *  from)

Definition at line 212 of file AVX/PacketMath.h.

{ EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_pd(from); }
template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::ploadu< Packet4f > ( const float *  from)

Definition at line 424 of file AltiVec/PacketMath.h.

{
  EIGEN_DEBUG_ALIGNED_LOAD
  return (Packet4f) vec_vsx_ld((long)from & 15, (const float*) _EIGEN_ALIGNED_PTR(from));
}
template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::ploadu< Packet4i > ( const int *  from)

Definition at line 419 of file AltiVec/PacketMath.h.

{
  EIGEN_DEBUG_ALIGNED_LOAD
  return (Packet4i) vec_vsx_ld((long)from & 15, (const int*) _EIGEN_ALIGNED_PTR(from));
}
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::ploadu< Packet8f > ( const float *  from)

Definition at line 211 of file AVX/PacketMath.h.

{ EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_ps(from); }
template<>
EIGEN_STRONG_INLINE Packet8i Eigen::internal::ploadu< Packet8i > ( const int *  from)

Definition at line 213 of file AVX/PacketMath.h.

{ EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from)); }
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::plog ( const Packet &  a)
Returns:
the log of a (coeff-wise)

Definition at line 416 of file GenericPacketMath.h.

{ using std::log; return log(a); }
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::plog10 ( const Packet &  a)
Returns:
the log10 of a (coeff-wise)

Definition at line 420 of file GenericPacketMath.h.

{ using std::log10; return log10(a); }

Definition at line 23 of file arch/AltiVec/MathFunctions.h.

{
  Packet4f x = _x;
  _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
  _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);
  _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);
  _EIGEN_DECLARE_CONST_Packet4i(23, 23);

  _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inv_mant_mask, ~0x7f800000);

  /* the smallest non denormalized float number */
  _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(min_norm_pos,  0x00800000);
  _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_inf,     0xff800000); // -1.f/0.f
  _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_nan,     0xffffffff);
  
  /* natural logarithm computed for 4 simultaneous float
    return NaN for x <= 0
  */
  _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f);
  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292E-2f);
  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, - 1.1514610310E-1f);
  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740E-1f);
  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, - 1.2420140846E-1f);
  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, + 1.4249322787E-1f);
  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, - 1.6668057665E-1f);
  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, + 2.0000714765E-1f);
  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, - 2.4999993993E-1f);
  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, + 3.3333331174E-1f);
  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f);
  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f);


  Packet4i emm0;

  /* isvalid_mask is 0 if x < 0 or x is NaN. */
  Packet4ui isvalid_mask = reinterpret_cast<Packet4ui>(vec_cmpge(x, p4f_ZERO));
  Packet4ui iszero_mask = reinterpret_cast<Packet4ui>(vec_cmpeq(x, p4f_ZERO));

  x = pmax(x, p4f_min_norm_pos);  /* cut off denormalized stuff */
  emm0 = vec_sr(reinterpret_cast<Packet4i>(x),
                reinterpret_cast<Packet4ui>(p4i_23));

  /* keep only the fractional part */
  x = pand(x, p4f_inv_mant_mask);
  x = por(x, p4f_half);

  emm0 = psub(emm0, p4i_0x7f);
  Packet4f e = padd(vec_ctf(emm0, 0), p4f_1);

  /* part2:
     if( x < SQRTHF ) {
       e -= 1;
       x = x + x - 1.0;
     } else { x = x - 1.0; }
  */
  Packet4f mask = reinterpret_cast<Packet4f>(vec_cmplt(x, p4f_cephes_SQRTHF));
  Packet4f tmp = pand(x, mask);
  x = psub(x, p4f_1);
  e = psub(e, pand(p4f_1, mask));
  x = padd(x, tmp);

  Packet4f x2 = pmul(x,x);
  Packet4f x3 = pmul(x2,x);

  Packet4f y, y1, y2;
  y  = pmadd(p4f_cephes_log_p0, x, p4f_cephes_log_p1);
  y1 = pmadd(p4f_cephes_log_p3, x, p4f_cephes_log_p4);
  y2 = pmadd(p4f_cephes_log_p6, x, p4f_cephes_log_p7);
  y  = pmadd(y , x, p4f_cephes_log_p2);
  y1 = pmadd(y1, x, p4f_cephes_log_p5);
  y2 = pmadd(y2, x, p4f_cephes_log_p8);
  y = pmadd(y, x3, y1);
  y = pmadd(y, x3, y2);
  y = pmul(y, x3);

  y1 = pmul(e, p4f_cephes_log_q1);
  tmp = pmul(x2, p4f_half);
  y = padd(y, y1);
  x = psub(x, tmp);
  y2 = pmul(e, p4f_cephes_log_q2);
  x = padd(x, y);
  x = padd(x, y2);
  // negative arg will be NAN, 0 will be -INF
  x = vec_sel(x, p4f_minus_inf, iszero_mask);
  x = vec_sel(p4f_minus_nan, x, isvalid_mask);
  return x;
}

Definition at line 121 of file arch/AVX/MathFunctions.h.

                                   {
  Packet8f x = _x;
  _EIGEN_DECLARE_CONST_Packet8f(1, 1.0f);
  _EIGEN_DECLARE_CONST_Packet8f(half, 0.5f);
  _EIGEN_DECLARE_CONST_Packet8f(126f, 126.0f);

  _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(inv_mant_mask, ~0x7f800000);

  // The smallest non denormalized float number.
  _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(min_norm_pos, 0x00800000);
  _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(minus_inf, 0xff800000);

  // Polynomial coefficients.
  _EIGEN_DECLARE_CONST_Packet8f(cephes_SQRTHF, 0.707106781186547524f);
  _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p0, 7.0376836292E-2f);
  _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p1, -1.1514610310E-1f);
  _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p2, 1.1676998740E-1f);
  _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p3, -1.2420140846E-1f);
  _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p4, +1.4249322787E-1f);
  _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p5, -1.6668057665E-1f);
  _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p6, +2.0000714765E-1f);
  _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p7, -2.4999993993E-1f);
  _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p8, +3.3333331174E-1f);
  _EIGEN_DECLARE_CONST_Packet8f(cephes_log_q1, -2.12194440e-4f);
  _EIGEN_DECLARE_CONST_Packet8f(cephes_log_q2, 0.693359375f);

  Packet8f invalid_mask = _mm256_cmp_ps(x, _mm256_setzero_ps(), _CMP_NGE_UQ); // not greater equal is true if x is NaN
  Packet8f iszero_mask = _mm256_cmp_ps(x, _mm256_setzero_ps(), _CMP_EQ_OQ);

  // Truncate input values to the minimum positive normal.
  x = pmax(x, p8f_min_norm_pos);

  Packet8f emm0 = pshiftright(x,23);
  Packet8f e = _mm256_sub_ps(emm0, p8f_126f);

  // Set the exponents to -1, i.e. x are in the range [0.5,1).
  x = _mm256_and_ps(x, p8f_inv_mant_mask);
  x = _mm256_or_ps(x, p8f_half);

  // part2: Shift the inputs from the range [0.5,1) to [sqrt(1/2),sqrt(2))
  // and shift by -1. The values are then centered around 0, which improves
  // the stability of the polynomial evaluation.
  //   if( x < SQRTHF ) {
  //     e -= 1;
  //     x = x + x - 1.0;
  //   } else { x = x - 1.0; }
  Packet8f mask = _mm256_cmp_ps(x, p8f_cephes_SQRTHF, _CMP_LT_OQ);
  Packet8f tmp = _mm256_and_ps(x, mask);
  x = psub(x, p8f_1);
  e = psub(e, _mm256_and_ps(p8f_1, mask));
  x = padd(x, tmp);

  Packet8f x2 = pmul(x, x);
  Packet8f x3 = pmul(x2, x);

  // Evaluate the polynomial approximant of degree 8 in three parts, probably
  // to improve instruction-level parallelism.
  Packet8f y, y1, y2;
  y = pmadd(p8f_cephes_log_p0, x, p8f_cephes_log_p1);
  y1 = pmadd(p8f_cephes_log_p3, x, p8f_cephes_log_p4);
  y2 = pmadd(p8f_cephes_log_p6, x, p8f_cephes_log_p7);
  y = pmadd(y, x, p8f_cephes_log_p2);
  y1 = pmadd(y1, x, p8f_cephes_log_p5);
  y2 = pmadd(y2, x, p8f_cephes_log_p8);
  y = pmadd(y, x3, y1);
  y = pmadd(y, x3, y2);
  y = pmul(y, x3);

  // Add the logarithm of the exponent back to the result of the interpolation.
  y1 = pmul(e, p8f_cephes_log_q1);
  tmp = pmul(x2, p8f_half);
  y = padd(y, y1);
  x = psub(x, tmp);
  y2 = pmul(e, p8f_cephes_log_q2);
  x = padd(x, y);
  x = padd(x, y2);

  // Filter out invalid inputs, i.e. negative arg will be NAN, 0 will be -INF.
  return _mm256_or_ps(
      _mm256_andnot_ps(iszero_mask, _mm256_or_ps(x, invalid_mask)),
      _mm256_and_ps(iszero_mask, p8f_minus_inf));
}
template<typename Packet >
Packet Eigen::internal::plset ( const typename unpacket_traits< Packet >::type a) [inline]

Returns a packet with coefficients (a,a+1,...,a+packet_size-1).

Definition at line 276 of file GenericPacketMath.h.

{ return a; }
template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::plset< Packet2d > ( const double &  a)

Definition at line 190 of file SSE/PacketMath.h.

{ return _mm_add_pd(pset1<Packet2d>(a),_mm_set_pd(1,0)); }
template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::plset< Packet4d > ( const double &  a)

Definition at line 122 of file AVX/PacketMath.h.

{ return _mm256_add_pd(_mm256_set1_pd(a), _mm256_set_pd(3.0,2.0,1.0,0.0)); }
template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::plset< Packet4f > ( const float &  a)

Definition at line 297 of file AltiVec/PacketMath.h.

{ return vec_add(pset1<Packet4f>(a), p4f_COUNTDOWN); }
template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::plset< Packet4i > ( const int &  a)

Definition at line 298 of file AltiVec/PacketMath.h.

{ return vec_add(pset1<Packet4i>(a), p4i_COUNTDOWN); }
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::plset< Packet8f > ( const float &  a)

Definition at line 121 of file AVX/PacketMath.h.

{ return _mm256_add_ps(_mm256_set1_ps(a), _mm256_set_ps(7.0,6.0,5.0,4.0,3.0,2.0,1.0,0.0)); }
template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pmadd ( const Packet4f &  a,
const Packet4f &  b,
const Packet4f &  c 
)

Definition at line 373 of file AltiVec/PacketMath.h.

{ return vec_madd(a, b, c); }
template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pmadd ( const Packet4i &  a,
const Packet4i &  b,
const Packet4i &  c 
)

Definition at line 374 of file AltiVec/PacketMath.h.

{ return padd(pmul(a,b), c); }
template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pmadd ( const Packet &  a,
const Packet &  b,
const Packet &  c 
) [inline]
Returns:
a * b + c (coeff-wise)

Definition at line 474 of file GenericPacketMath.h.

{ return padd(pmul(a, b),c); }
template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pmax ( const Packet &  a,
const Packet &  b 
) [inline]
Returns:
the max of a and b (coeff-wise)

Definition at line 179 of file GenericPacketMath.h.

                         { return numext::maxi(a, b); }
template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::pmax< Packet2d > ( const Packet2d &  a,
const Packet2d &  b 
)

Definition at line 262 of file SSE/PacketMath.h.

{ return _mm_max_pd(a,b); }
template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::pmax< Packet4d > ( const Packet4d &  a,
const Packet4d &  b 
)

Definition at line 184 of file AVX/PacketMath.h.

{ return _mm256_max_pd(a,b); }
template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pmax< Packet4f > ( const Packet4f &  a,
const Packet4f &  b 
)

Definition at line 379 of file AltiVec/PacketMath.h.

{ return vec_max(a, b); }
template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pmax< Packet4i > ( const Packet4i &  a,
const Packet4i &  b 
)

Definition at line 380 of file AltiVec/PacketMath.h.

{ return vec_max(a, b); }
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::pmax< Packet8f > ( const Packet8f &  a,
const Packet8f &  b 
)

Definition at line 183 of file AVX/PacketMath.h.

{ return _mm256_max_ps(a,b); }
template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pmin ( const Packet &  a,
const Packet &  b 
) [inline]
Returns:
the min of a and b (coeff-wise)

Definition at line 174 of file GenericPacketMath.h.

                         { return numext::mini(a, b); }
template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::pmin< Packet2d > ( const Packet2d &  a,
const Packet2d &  b 
)

Definition at line 249 of file SSE/PacketMath.h.

{ return _mm_min_pd(a,b); }
template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::pmin< Packet4d > ( const Packet4d &  a,
const Packet4d &  b 
)

Definition at line 181 of file AVX/PacketMath.h.

{ return _mm256_min_pd(a,b); }
template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pmin< Packet4f > ( const Packet4f &  a,
const Packet4f &  b 
)

Definition at line 376 of file AltiVec/PacketMath.h.

{ return vec_min(a, b); }
template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pmin< Packet4i > ( const Packet4i &  a,
const Packet4i &  b 
)

Definition at line 377 of file AltiVec/PacketMath.h.

{ return vec_min(a, b); }
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::pmin< Packet8f > ( const Packet8f &  a,
const Packet8f &  b 
)

Definition at line 180 of file AVX/PacketMath.h.

{ return _mm256_min_ps(a,b); }
template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pmul ( const Packet &  a,
const Packet &  b 
) [inline]
Returns:
a * b (coeff-wise)

Definition at line 164 of file GenericPacketMath.h.

                         { return a*b; }
template<>
std::complex<float> Eigen::internal::pmul ( const std::complex< float > &  a,
const std::complex< float > &  b 
) [inline]

Definition at line 548 of file GenericPacketMath.h.

{ return std::complex<float>(real(a)*real(b) - imag(a)*imag(b), imag(a)*real(b) + real(a)*imag(b)); }
template<>
std::complex<double> Eigen::internal::pmul ( const std::complex< double > &  a,
const std::complex< double > &  b 
) [inline]

Definition at line 551 of file GenericPacketMath.h.

{ return std::complex<double>(real(a)*real(b) - imag(a)*imag(b), imag(a)*real(b) + real(a)*imag(b)); }
template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::pmul< Packet1cd > ( const Packet1cd &  a,
const Packet1cd &  b 
)

Definition at line 310 of file SSE/Complex.h.

{
  #ifdef EIGEN_VECTORIZE_SSE3
  return Packet1cd(_mm_addsub_pd(_mm_mul_pd(_mm_movedup_pd(a.v), b.v),
                                 _mm_mul_pd(vec2d_swizzle1(a.v, 1, 1),
                                            vec2d_swizzle1(b.v, 1, 0))));
  #else
  const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0));
  return Packet1cd(_mm_add_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v),
                              _mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 1, 1),
                                                    vec2d_swizzle1(b.v, 1, 0)), mask)));
  #endif
}
template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::pmul< Packet2cd > ( const Packet2cd &  a,
const Packet2cd &  b 
)

Definition at line 281 of file AVX/Complex.h.

{
  __m256d tmp1 = _mm256_shuffle_pd(a.v,a.v,0x0);
  __m256d even = _mm256_mul_pd(tmp1, b.v);
  __m256d tmp2 = _mm256_shuffle_pd(a.v,a.v,0xF);
  __m256d tmp3 = _mm256_shuffle_pd(b.v,b.v,0x5);
  __m256d odd  = _mm256_mul_pd(tmp2, tmp3);
  return Packet2cd(_mm256_addsub_pd(even, odd));
}
template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pmul< Packet2cf > ( const Packet2cf &  a,
const Packet2cf &  b 
)

Definition at line 91 of file AltiVec/Complex.h.

{
  Packet4f v1, v2;

  // Permute and multiply the real parts of a and b
  v1 = vec_perm(a.v, a.v, p16uc_PSET32_WODD);
  // Get the imaginary parts of a
  v2 = vec_perm(a.v, a.v, p16uc_PSET32_WEVEN);
  // multiply a_re * b 
  v1 = vec_madd(v1, b.v, p4f_ZERO);
  // multiply a_im * b and get the conjugate result
  v2 = vec_madd(v2, b.v, p4f_ZERO);
  v2 = (Packet4f) vec_xor((Packet4ui)v2, p4ui_CONJ_XOR);
  // permute back to a proper order
  v2 = vec_perm(v2, v2, p16uc_COMPLEX32_REV);
  
  return Packet2cf(vec_add(v1, v2));
}
template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::pmul< Packet2d > ( const Packet2d &  a,
const Packet2d &  b 
)

Definition at line 221 of file SSE/PacketMath.h.

{ return _mm_mul_pd(a,b); }
template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::pmul< Packet4cf > ( const Packet4cf &  a,
const Packet4cf &  b 
)

Definition at line 62 of file AVX/Complex.h.

{
  __m256 tmp1 = _mm256_mul_ps(_mm256_moveldup_ps(a.v), b.v);
  __m256 tmp2 = _mm256_mul_ps(_mm256_movehdup_ps(a.v), _mm256_permute_ps(b.v, _MM_SHUFFLE(2,3,0,1)));
  __m256 result = _mm256_addsub_ps(tmp1, tmp2);
  return Packet4cf(result);
}
template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::pmul< Packet4d > ( const Packet4d &  a,
const Packet4d &  b 
)

Definition at line 144 of file AVX/PacketMath.h.

{ return _mm256_mul_pd(a,b); }
template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pmul< Packet4f > ( const Packet4f &  a,
const Packet4f &  b 
)

Definition at line 312 of file AltiVec/PacketMath.h.

{ return vec_madd(a,b,p4f_ZERO); }
template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pmul< Packet4i > ( const Packet4i &  a,
const Packet4i &  b 
)

Definition at line 147 of file NEON/PacketMath.h.

{ return vmulq_s32(a,b); }
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::pmul< Packet8f > ( const Packet8f &  a,
const Packet8f &  b 
)

Definition at line 143 of file AVX/PacketMath.h.

{ return _mm256_mul_ps(a,b); }
template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::pnegate ( const Packet4cf &  a)

Definition at line 52 of file AVX/Complex.h.

{
  return Packet4cf(pnegate(a.v));
}
template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pnegate ( const Packet2cf &  a)

Definition at line 88 of file AltiVec/Complex.h.

{ return Packet2cf(pnegate(a.v)); }
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::pnegate ( const Packet8f &  a)

Definition at line 130 of file AVX/PacketMath.h.

{
  return _mm256_sub_ps(_mm256_set1_ps(0.0),a);
}
template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::pnegate ( const Packet4d &  a)

Definition at line 134 of file AVX/PacketMath.h.

{
  return _mm256_sub_pd(_mm256_set1_pd(0.0),a);
}
template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pnegate ( const Packet &  a) [inline]
Returns:
-a (coeff-wise)

Definition at line 155 of file GenericPacketMath.h.

{ return -a; }
template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::pnegate ( const Packet2d &  a)

Definition at line 206 of file SSE/PacketMath.h.

{
  const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x80000000));
  return _mm_xor_pd(a,mask);
}
template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::pnegate ( const Packet2cd &  a)

Definition at line 274 of file AVX/Complex.h.

{ return Packet2cd(pnegate(a.v)); }
template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::pnegate ( const Packet1cd &  a)

Definition at line 303 of file SSE/Complex.h.

{ return Packet1cd(pnegate(Packet2d(a.v))); }
template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pnegate ( const Packet4f &  a)

Definition at line 306 of file AltiVec/PacketMath.h.

{ return psub<Packet4f>(p4f_ZERO, a); }
template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pnegate ( const Packet4i &  a)

Definition at line 307 of file AltiVec/PacketMath.h.

{ return psub<Packet4i>(p4i_ZERO, a); }
template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::por ( const Packet &  a,
const Packet &  b 
) [inline]
Returns:
the bitwise or of a and b

Definition at line 196 of file GenericPacketMath.h.

{ return a | b; }
template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::por< Packet1cd > ( const Packet1cd &  a,
const Packet1cd &  b 
)

Definition at line 325 of file SSE/Complex.h.

{ return Packet1cd(_mm_or_pd(a.v,b.v)); }
template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::por< Packet2cd > ( const Packet2cd &  a,
const Packet2cd &  b 
)

Definition at line 292 of file AVX/Complex.h.

{ return Packet2cd(_mm256_or_pd(a.v,b.v)); }
template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::por< Packet2cf > ( const Packet2cf &  a,
const Packet2cf &  b 
)

Definition at line 111 of file AltiVec/Complex.h.

{ return Packet2cf(vec_or(a.v,b.v)); }
template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::por< Packet2d > ( const Packet2d &  a,
const Packet2d &  b 
)

Definition at line 290 of file SSE/PacketMath.h.

{ return _mm_or_pd(a,b); }
template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::por< Packet4cf > ( const Packet4cf &  a,
const Packet4cf &  b 
)

Definition at line 71 of file AVX/Complex.h.

{ return Packet4cf(_mm256_or_ps(a.v,b.v)); }
template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::por< Packet4d > ( const Packet4d &  a,
const Packet4d &  b 
)

Definition at line 199 of file AVX/PacketMath.h.

{ return _mm256_or_pd(a,b); }
template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::por< Packet4f > ( const Packet4f &  a,
const Packet4f &  b 
)

Definition at line 385 of file AltiVec/PacketMath.h.

{ return vec_or(a, b); }
template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::por< Packet4i > ( const Packet4i &  a,
const Packet4i &  b 
)

Definition at line 386 of file AltiVec/PacketMath.h.

{ return vec_or(a, b); }
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::por< Packet8f > ( const Packet8f &  a,
const Packet8f &  b 
)

Definition at line 198 of file AVX/PacketMath.h.

{ return _mm256_or_ps(a,b); }
template<typename Packet >
EIGEN_DEVICE_FUNC unpacket_traits<Packet>::type Eigen::internal::predux ( const Packet &  a) [inline]
Returns:
the sum of the elements of a

Definition at line 317 of file GenericPacketMath.h.

{ return a; }
template<typename Packet >
EIGEN_DEVICE_FUNC conditional<(unpacket_traits<Packet>::size%8)==0,typename unpacket_traits<Packet>::half,Packet>::type Eigen::internal::predux4 ( const Packet &  a) [inline]
Returns:
the sum of the elements of a by block of 4 elements. For a packet {a0, a1, a2, a3, a4, a5, a6, a7}, it returns a half packet {a0+a4, a1+a5, a2+a6, a3+a7} For packet-size smaller or equal to 4, this boils down to a noop.

Definition at line 326 of file GenericPacketMath.h.

{ return a; }
template<typename Packet >
const DoublePacket<Packet>& Eigen::internal::predux4 ( const DoublePacket< Packet > &  a)

Definition at line 598 of file GeneralBlockPanelKernel.h.

{
  return a;
}
template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::predux4< Packet8f > ( const Packet8f &  a)

Definition at line 400 of file AVX/PacketMath.h.

{
  return _mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1));
}
template<>
EIGEN_STRONG_INLINE std::complex<double> Eigen::internal::predux< Packet1cd > ( const Packet1cd &  a)

Definition at line 354 of file SSE/Complex.h.

{
  return pfirst(a);
}
template<>
EIGEN_STRONG_INLINE std::complex<double> Eigen::internal::predux< Packet2cd > ( const Packet2cd &  a)

Definition at line 340 of file AVX/Complex.h.

{
  return predux(padd(Packet1cd(_mm256_extractf128_pd(a.v,0)),
                     Packet1cd(_mm256_extractf128_pd(a.v,1))));
}
template<>
EIGEN_STRONG_INLINE std::complex< float > Eigen::internal::predux< Packet2cf > ( const Packet2cf &  a)

Definition at line 143 of file AltiVec/Complex.h.

{
  Packet4f b;
  b = (Packet4f) vec_sld(a.v, a.v, 8);
  b = padd(a.v, b);
  return pfirst(Packet2cf(b));
}
template<>
EIGEN_STRONG_INLINE double Eigen::internal::predux< Packet2d > ( const Packet2d &  a)

Definition at line 545 of file SSE/PacketMath.h.

{
  return pfirst<Packet2d>(_mm_add_sd(a, _mm_unpackhi_pd(a,a)));
}
template<>
EIGEN_STRONG_INLINE std::complex<float> Eigen::internal::predux< Packet4cf > ( const Packet4cf &  a)

Definition at line 137 of file AVX/Complex.h.

{
  return predux(padd(Packet2cf(_mm256_extractf128_ps(a.v,0)),
                     Packet2cf(_mm256_extractf128_ps(a.v,1))));
}
template<>
EIGEN_STRONG_INLINE double Eigen::internal::predux< Packet4d > ( const Packet4d &  a)

Definition at line 394 of file AVX/PacketMath.h.

{
  Packet4d tmp0 = _mm256_hadd_pd(a,_mm256_permute2f128_pd(a,a,1));
  return pfirst(_mm256_hadd_pd(tmp0,tmp0));
}
template<>
EIGEN_STRONG_INLINE float Eigen::internal::predux< Packet4f > ( const Packet4f &  a)

Definition at line 511 of file AltiVec/PacketMath.h.

{
  Packet4f b, sum;
  b   = (Packet4f) vec_sld(a, a, 8);
  sum = vec_add(a, b);
  b   = (Packet4f) vec_sld(sum, sum, 4);
  sum = vec_add(sum, b);
  return pfirst(sum);
}
template<>
EIGEN_STRONG_INLINE int Eigen::internal::predux< Packet4i > ( const Packet4i &  a)

Definition at line 549 of file AltiVec/PacketMath.h.

{
  Packet4i sum;
  sum = vec_sums(a, p4i_ZERO);
#ifdef _BIG_ENDIAN
  sum = vec_sld(sum, p4i_ZERO, 12);
#else
  sum = vec_sld(p4i_ZERO, sum, 4);
#endif
  return pfirst(sum);
}
template<>
EIGEN_STRONG_INLINE float Eigen::internal::predux< Packet8f > ( const Packet8f &  a)

Definition at line 388 of file AVX/PacketMath.h.

{
  Packet8f tmp0 = _mm256_hadd_ps(a,_mm256_permute2f128_ps(a,a,1));
  tmp0 = _mm256_hadd_ps(tmp0,tmp0);
  return pfirst(_mm256_hadd_ps(tmp0, tmp0));
}
template<typename Packet >
EIGEN_DEVICE_FUNC unpacket_traits<Packet>::type Eigen::internal::predux_max ( const Packet &  a) [inline]
Returns:
the max of the elements of a

Definition at line 338 of file GenericPacketMath.h.

{ return a; }
template<>
EIGEN_STRONG_INLINE double Eigen::internal::predux_max< Packet2d > ( const Packet2d &  a)

Definition at line 656 of file SSE/PacketMath.h.

{
  return pfirst<Packet2d>(_mm_max_sd(a, _mm_unpackhi_pd(a,a)));
}
template<>
EIGEN_STRONG_INLINE double Eigen::internal::predux_max< Packet4d > ( const Packet4d &  a)

Definition at line 438 of file AVX/PacketMath.h.

{
  Packet4d tmp = _mm256_max_pd(a, _mm256_permute2f128_pd(a,a,1));
  return pfirst(_mm256_max_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
}
template<>
EIGEN_STRONG_INLINE float Eigen::internal::predux_max< Packet4f > ( const Packet4f &  a)

Definition at line 623 of file AltiVec/PacketMath.h.

{
  Packet4f b, res;
  b = vec_max(a, vec_sld(a, a, 8));
  res = vec_max(b, vec_sld(b, b, 4));
  return pfirst(res);
}
template<>
EIGEN_STRONG_INLINE int Eigen::internal::predux_max< Packet4i > ( const Packet4i &  a)

Definition at line 631 of file AltiVec/PacketMath.h.

{
  Packet4i b, res;
  b = vec_max(a, vec_sld(a, a, 8));
  res = vec_max(b, vec_sld(b, b, 4));
  return pfirst(res);
}
template<>
EIGEN_STRONG_INLINE float Eigen::internal::predux_max< Packet8f > ( const Packet8f &  a)

Definition at line 431 of file AVX/PacketMath.h.

{
  Packet8f tmp = _mm256_max_ps(a, _mm256_permute2f128_ps(a,a,1));
  tmp = _mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
  return pfirst(_mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
}
template<typename Packet >
EIGEN_DEVICE_FUNC unpacket_traits<Packet>::type Eigen::internal::predux_min ( const Packet &  a) [inline]
Returns:
the min of the elements of a

Definition at line 334 of file GenericPacketMath.h.

{ return a; }
template<>
EIGEN_STRONG_INLINE double Eigen::internal::predux_min< Packet2d > ( const Packet2d &  a)

Definition at line 630 of file SSE/PacketMath.h.

{
  return pfirst<Packet2d>(_mm_min_sd(a, _mm_unpackhi_pd(a,a)));
}
template<>
EIGEN_STRONG_INLINE double Eigen::internal::predux_min< Packet4d > ( const Packet4d &  a)

Definition at line 425 of file AVX/PacketMath.h.

{
  Packet4d tmp = _mm256_min_pd(a, _mm256_permute2f128_pd(a,a,1));
  return pfirst(_mm256_min_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
}
template<>
EIGEN_STRONG_INLINE float Eigen::internal::predux_min< Packet4f > ( const Packet4f &  a)

Definition at line 606 of file AltiVec/PacketMath.h.

{
  Packet4f b, res;
  b = vec_min(a, vec_sld(a, a, 8));
  res = vec_min(b, vec_sld(b, b, 4));
  return pfirst(res);
}
template<>
EIGEN_STRONG_INLINE int Eigen::internal::predux_min< Packet4i > ( const Packet4i &  a)

Definition at line 614 of file AltiVec/PacketMath.h.

{
  Packet4i b, res;
  b = vec_min(a, vec_sld(a, a, 8));
  res = vec_min(b, vec_sld(b, b, 4));
  return pfirst(res);
}
template<>
EIGEN_STRONG_INLINE float Eigen::internal::predux_min< Packet8f > ( const Packet8f &  a)

Definition at line 419 of file AVX/PacketMath.h.

{
  Packet8f tmp = _mm256_min_ps(a, _mm256_permute2f128_ps(a,a,1));
  tmp = _mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
  return pfirst(_mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
}
template<typename Packet >
EIGEN_DEVICE_FUNC unpacket_traits<Packet>::type Eigen::internal::predux_mul ( const Packet &  a) [inline]
Returns:
the product of the elements of a

Definition at line 330 of file GenericPacketMath.h.

{ return a; }
template<>
EIGEN_STRONG_INLINE std::complex<double> Eigen::internal::predux_mul< Packet1cd > ( const Packet1cd &  a)

Definition at line 364 of file SSE/Complex.h.

{
  return pfirst(a);
}
template<>
EIGEN_STRONG_INLINE std::complex<double> Eigen::internal::predux_mul< Packet2cd > ( const Packet2cd &  a)

Definition at line 354 of file AVX/Complex.h.

{
  return predux(pmul(Packet1cd(_mm256_extractf128_pd(a.v,0)),
                     Packet1cd(_mm256_extractf128_pd(a.v,1))));
}
template<>
EIGEN_STRONG_INLINE std::complex< float > Eigen::internal::predux_mul< Packet2cf > ( const Packet2cf &  a)

Definition at line 167 of file AltiVec/Complex.h.

{
  Packet4f b;
  Packet2cf prod;
  b = (Packet4f) vec_sld(a.v, a.v, 8);
  prod = pmul(a, Packet2cf(b));

  return pfirst(prod);
}
template<>
EIGEN_STRONG_INLINE double Eigen::internal::predux_mul< Packet2d > ( const Packet2d &  a)

Definition at line 610 of file SSE/PacketMath.h.

{
  return pfirst<Packet2d>(_mm_mul_sd(a, _mm_unpackhi_pd(a,a)));
}
template<>
EIGEN_STRONG_INLINE std::complex<float> Eigen::internal::predux_mul< Packet4cf > ( const Packet4cf &  a)

Definition at line 158 of file AVX/Complex.h.

{
  return predux_mul(pmul(Packet2cf(_mm256_extractf128_ps(a.v, 0)),
                         Packet2cf(_mm256_extractf128_ps(a.v, 1))));
}
template<>
EIGEN_STRONG_INLINE double Eigen::internal::predux_mul< Packet4d > ( const Packet4d &  a)

Definition at line 412 of file AVX/PacketMath.h.

{
  Packet4d tmp;
  tmp = _mm256_mul_pd(a, _mm256_permute2f128_pd(a,a,1));
  return pfirst(_mm256_mul_pd(tmp, _mm256_shuffle_pd(tmp,tmp,1)));
}
template<>
EIGEN_STRONG_INLINE float Eigen::internal::predux_mul< Packet4f > ( const Packet4f &  a)

Definition at line 591 of file AltiVec/PacketMath.h.

{
  Packet4f prod;
  prod = pmul(a, (Packet4f)vec_sld(a, a, 8));
  return pfirst(pmul(prod, (Packet4f)vec_sld(prod, prod, 4)));
}
template<>
EIGEN_STRONG_INLINE int Eigen::internal::predux_mul< Packet4i > ( const Packet4i &  a)

Definition at line 598 of file AltiVec/PacketMath.h.

{
  EIGEN_ALIGN16 int aux[4];
  pstore(aux, a);
  return aux[0] * aux[1] * aux[2] * aux[3];
}
template<>
EIGEN_STRONG_INLINE float Eigen::internal::predux_mul< Packet8f > ( const Packet8f &  a)

Definition at line 405 of file AVX/PacketMath.h.

{
  Packet8f tmp;
  tmp = _mm256_mul_ps(a, _mm256_permute2f128_ps(a,a,1));
  tmp = _mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
  return pfirst(_mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
}
template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::preduxp ( const Packet *  vecs) [inline]
Returns:
a packet where the element i contains the sum of the packet of vec[i]

Definition at line 314 of file GenericPacketMath.h.

{ return vecs[0]; }
template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::preduxp< Packet1cd > ( const Packet1cd *  vecs)

Definition at line 359 of file SSE/Complex.h.

{
  return vecs[0];
}
template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::preduxp< Packet2cd > ( const Packet2cd *  vecs)

Definition at line 346 of file AVX/Complex.h.

{
  Packet4d t0 = _mm256_permute2f128_pd(vecs[0].v,vecs[1].v, 0 + (2<<4));
  Packet4d t1 = _mm256_permute2f128_pd(vecs[0].v,vecs[1].v, 1 + (3<<4));

  return Packet2cd(_mm256_add_pd(t0,t1));
}
template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::preduxp< Packet2cf > ( const Packet2cf *  vecs)

Definition at line 151 of file AltiVec/Complex.h.

{
  Packet4f b1, b2;
#ifdef _BIG_ENDIAN  
  b1 = (Packet4f) vec_sld(vecs[0].v, vecs[1].v, 8);
  b2 = (Packet4f) vec_sld(vecs[1].v, vecs[0].v, 8);
#else
  b1 = (Packet4f) vec_sld(vecs[1].v, vecs[0].v, 8);
  b2 = (Packet4f) vec_sld(vecs[0].v, vecs[1].v, 8);
#endif
  b2 = (Packet4f) vec_sld(b2, b2, 8);
  b2 = padd(b1, b2);

  return Packet2cf(b2);
}
template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::preduxp< Packet2d > ( const Packet2d *  vecs)

Definition at line 564 of file SSE/PacketMath.h.

{
  return _mm_add_pd(_mm_unpacklo_pd(vecs[0], vecs[1]), _mm_unpackhi_pd(vecs[0], vecs[1]));
}
template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::preduxp< Packet4cf > ( const Packet4cf *  vecs)

Definition at line 143 of file AVX/Complex.h.

{
  Packet8f t0 = _mm256_shuffle_ps(vecs[0].v, vecs[0].v, _MM_SHUFFLE(3, 1, 2 ,0));
  Packet8f t1 = _mm256_shuffle_ps(vecs[1].v, vecs[1].v, _MM_SHUFFLE(3, 1, 2 ,0));
  t0 = _mm256_hadd_ps(t0,t1);
  Packet8f t2 = _mm256_shuffle_ps(vecs[2].v, vecs[2].v, _MM_SHUFFLE(3, 1, 2 ,0));
  Packet8f t3 = _mm256_shuffle_ps(vecs[3].v, vecs[3].v, _MM_SHUFFLE(3, 1, 2 ,0));
  t2 = _mm256_hadd_ps(t2,t3);
  
  t1 = _mm256_permute2f128_ps(t0,t2, 0 + (2<<4));
  t3 = _mm256_permute2f128_ps(t0,t2, 1 + (3<<4));

  return Packet4cf(_mm256_add_ps(t1,t3));
}
template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::preduxp< Packet4d > ( const Packet4d *  vecs)

Definition at line 375 of file AVX/PacketMath.h.

{
 Packet4d tmp0, tmp1;

  tmp0 = _mm256_hadd_pd(vecs[0], vecs[1]);
  tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));

  tmp1 = _mm256_hadd_pd(vecs[2], vecs[3]);
  tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));

  return _mm256_blend_pd(tmp0, tmp1, 0xC);
}
template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::preduxp< Packet4f > ( const Packet4f *  vecs)

Definition at line 521 of file AltiVec/PacketMath.h.

{
  Packet4f v[4], sum[4];

  // It's easier and faster to transpose then add as columns
  // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation
  // Do the transpose, first set of moves
  v[0] = vec_mergeh(vecs[0], vecs[2]);
  v[1] = vec_mergel(vecs[0], vecs[2]);
  v[2] = vec_mergeh(vecs[1], vecs[3]);
  v[3] = vec_mergel(vecs[1], vecs[3]);
  // Get the resulting vectors
  sum[0] = vec_mergeh(v[0], v[2]);
  sum[1] = vec_mergel(v[0], v[2]);
  sum[2] = vec_mergeh(v[1], v[3]);
  sum[3] = vec_mergel(v[1], v[3]);

  // Now do the summation:
  // Lines 0+1
  sum[0] = vec_add(sum[0], sum[1]);
  // Lines 2+3
  sum[1] = vec_add(sum[2], sum[3]);
  // Add the results
  sum[0] = vec_add(sum[0], sum[1]);

  return sum[0];
}
template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::preduxp< Packet4i > ( const Packet4i *  vecs)

Definition at line 561 of file AltiVec/PacketMath.h.

{
  Packet4i v[4], sum[4];

  // It's easier and faster to transpose then add as columns
  // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation
  // Do the transpose, first set of moves
  v[0] = vec_mergeh(vecs[0], vecs[2]);
  v[1] = vec_mergel(vecs[0], vecs[2]);
  v[2] = vec_mergeh(vecs[1], vecs[3]);
  v[3] = vec_mergel(vecs[1], vecs[3]);
  // Get the resulting vectors
  sum[0] = vec_mergeh(v[0], v[2]);
  sum[1] = vec_mergel(v[0], v[2]);
  sum[2] = vec_mergeh(v[1], v[3]);
  sum[3] = vec_mergel(v[1], v[3]);

  // Now do the summation:
  // Lines 0+1
  sum[0] = vec_add(sum[0], sum[1]);
  // Lines 2+3
  sum[1] = vec_add(sum[2], sum[3]);
  // Add the results
  sum[0] = vec_add(sum[0], sum[1]);

  return sum[0];
}
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::preduxp< Packet8f > ( const Packet8f *  vecs)

Definition at line 347 of file AVX/PacketMath.h.

{
    __m256 hsum1 = _mm256_hadd_ps(vecs[0], vecs[1]);
    __m256 hsum2 = _mm256_hadd_ps(vecs[2], vecs[3]);
    __m256 hsum3 = _mm256_hadd_ps(vecs[4], vecs[5]);
    __m256 hsum4 = _mm256_hadd_ps(vecs[6], vecs[7]);

    __m256 hsum5 = _mm256_hadd_ps(hsum1, hsum1);
    __m256 hsum6 = _mm256_hadd_ps(hsum2, hsum2);
    __m256 hsum7 = _mm256_hadd_ps(hsum3, hsum3);
    __m256 hsum8 = _mm256_hadd_ps(hsum4, hsum4);

    __m256 perm1 =  _mm256_permute2f128_ps(hsum5, hsum5, 0x23);
    __m256 perm2 =  _mm256_permute2f128_ps(hsum6, hsum6, 0x23);
    __m256 perm3 =  _mm256_permute2f128_ps(hsum7, hsum7, 0x23);
    __m256 perm4 =  _mm256_permute2f128_ps(hsum8, hsum8, 0x23);

    __m256 sum1 = _mm256_add_ps(perm1, hsum5);
    __m256 sum2 = _mm256_add_ps(perm2, hsum6);
    __m256 sum3 = _mm256_add_ps(perm3, hsum7);
    __m256 sum4 = _mm256_add_ps(perm4, hsum8);

    __m256 blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);
    __m256 blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);

    __m256 final = _mm256_blend_ps(blend1, blend2, 0xf0);
    return final;
}
template<typename Scalar >
EIGEN_DEVICE_FUNC void Eigen::internal::prefetch ( const Scalar *  addr) [inline]

tries to do cache prefetching of addr

Definition at line 293 of file GenericPacketMath.h.

{
#ifdef __CUDA_ARCH__
#if defined(__LP64__)
  // 64-bit pointer operand constraint for inlined asm
  asm(" prefetch.L1 [ %1 ];" : "=l"(addr) : "l"(addr));
#else
  // 32-bit pointer operand constraint for inlined asm
  asm(" prefetch.L1 [ %1 ];" : "=r"(addr) : "r"(addr));
#endif
#elif !EIGEN_COMP_MSVC
  __builtin_prefetch(addr);
#endif
}
template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< double > ( const double *  addr)

Definition at line 305 of file AVX/PacketMath.h.

{ _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< float > ( const float *  addr)

Definition at line 498 of file AltiVec/PacketMath.h.

{ vec_dstt(addr, DST_CTRL(2,2,32), DST_CHAN); }
template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< int > ( const int *  addr)

Definition at line 499 of file AltiVec/PacketMath.h.

{ vec_dstt(addr, DST_CTRL(2,2,32), DST_CHAN); }
template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< std::complex< double > > ( const std::complex< double > *  addr)

Definition at line 343 of file SSE/Complex.h.

{ _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< std::complex< float > > ( const std::complex< float > *  addr)

Definition at line 126 of file AltiVec/Complex.h.

{ vec_dstt((float *)addr, DST_CTRL(2,2,32), DST_CHAN); }
template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::preverse ( const Packet4cf &  a)

Definition at line 124 of file AVX/Complex.h.

                                                                      {
  __m128 low  = _mm256_extractf128_ps(a.v, 0);
  __m128 high = _mm256_extractf128_ps(a.v, 1);
  __m128d lowd  = _mm_castps_pd(low);
  __m128d highd = _mm_castps_pd(high);
  low  = _mm_castpd_ps(_mm_shuffle_pd(lowd,lowd,0x1));
  high = _mm_castpd_ps(_mm_shuffle_pd(highd,highd,0x1));
  __m256 result = _mm256_setzero_ps();
  result = _mm256_insertf128_ps(result, low, 1);
  result = _mm256_insertf128_ps(result, high, 0);
  return Packet4cf(result);
}
template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::preverse ( const Packet2cf &  a)

Definition at line 136 of file AltiVec/Complex.h.

{
  Packet4f rev_a;
  rev_a = vec_perm(a.v, a.v, p16uc_COMPLEX32_REV2);
  return Packet2cf(rev_a);
}
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::preverse ( const Packet8f &  a)

Definition at line 319 of file AVX/PacketMath.h.

{
  __m256 tmp = _mm256_shuffle_ps(a,a,0x1b);
  return _mm256_permute2f128_ps(tmp, tmp, 1);
}
template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::preverse ( const Packet4d &  a)

Definition at line 324 of file AVX/PacketMath.h.

{
   __m256d tmp = _mm256_shuffle_pd(a,a,5);
  return _mm256_permute2f128_pd(tmp, tmp, 1);

  __m256d swap_halves = _mm256_permute2f128_pd(a,a,1);
    return _mm256_permute_pd(swap_halves,5);
}
template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::preverse ( const Packet2cd &  a)

Definition at line 335 of file AVX/Complex.h.

                                                                      {
  __m256d result = _mm256_permute2f128_pd(a.v, a.v, 1);
  return Packet2cd(result);
}
template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::preverse ( const Packet &  a) [inline]
Returns:
the reversed elements of a

Definition at line 342 of file GenericPacketMath.h.

{ return a; }
template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::preverse ( const Packet1cd &  a)

Definition at line 352 of file SSE/Complex.h.

{ return a; }
template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::preverse ( const Packet2d &  a)

Definition at line 432 of file SSE/PacketMath.h.

{ return _mm_shuffle_pd(a,a,0x1); }
template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::preverse ( const Packet4f &  a)

Definition at line 505 of file AltiVec/PacketMath.h.

{ return (Packet4f)vec_perm((Packet16uc)a,(Packet16uc)a, p16uc_REVERSE32); }
template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::preverse ( const Packet4i &  a)

Definition at line 506 of file AltiVec/PacketMath.h.

{ return (Packet4i)vec_perm((Packet16uc)a,(Packet16uc)a, p16uc_REVERSE32); }
template<typename Derived >
std::ostream & Eigen::internal::print_matrix ( std::ostream &  s,
const Derived &  _m,
const IOFormat &  fmt 
)

print the matrix _m to the output stream s using the output format fmt

Definition at line 157 of file IO.h.

{
  if(_m.size() == 0)
  {
    s << fmt.matPrefix << fmt.matSuffix;
    return s;
  }
  
  typename Derived::Nested m = _m;
  typedef typename Derived::Scalar Scalar;

  Index width = 0;

  std::streamsize explicit_precision;
  if(fmt.precision == StreamPrecision)
  {
    explicit_precision = 0;
  }
  else if(fmt.precision == FullPrecision)
  {
    if (NumTraits<Scalar>::IsInteger)
    {
      explicit_precision = 0;
    }
    else
    {
      explicit_precision = significant_decimals_impl<Scalar>::run();
    }
  }
  else
  {
    explicit_precision = fmt.precision;
  }

  std::streamsize old_precision = 0;
  if(explicit_precision) old_precision = s.precision(explicit_precision);

  bool align_cols = !(fmt.flags & DontAlignCols);
  if(align_cols)
  {
    // compute the largest width
    for(Index j = 0; j < m.cols(); ++j)
      for(Index i = 0; i < m.rows(); ++i)
      {
        std::stringstream sstr;
        sstr.copyfmt(s);
        sstr << m.coeff(i,j);
        width = std::max<Index>(width, Index(sstr.str().length()));
      }
  }
  s << fmt.matPrefix;
  for(Index i = 0; i < m.rows(); ++i)
  {
    if (i)
      s << fmt.rowSpacer;
    s << fmt.rowPrefix;
    if(width) s.width(width);
    s << m.coeff(i, 0);
    for(Index j = 1; j < m.cols(); ++j)
    {
      s << fmt.coeffSeparator;
      if (width) s.width(width);
      s << m.coeff(i, j);
    }
    s << fmt.rowSuffix;
    if( i < m.rows() - 1)
      s << fmt.rowSeparator;
  }
  s << fmt.matSuffix;
  if(explicit_precision) s.precision(old_precision);
  return s;
}
template<size_t offset, typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::protate ( const Packet &  a) [inline]
Returns:
a packet with the coefficients rotated to the right in little-endian convention, by the given offset, e.g. for offset == 1: (packet[3], packet[2], packet[1], packet[0]) becomes (packet[0], packet[3], packet[2], packet[1])

Definition at line 356 of file GenericPacketMath.h.

{
  return offset ? protate_impl<offset, Packet>::run(a) : a;
}
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::pround ( const Packet &  a)
Returns:
the rounded value of a (coeff-wise)

Definition at line 434 of file GenericPacketMath.h.

{ using numext::round; return round(a); }
template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::pround< Packet4d > ( const Packet4d &  a)

Definition at line 187 of file AVX/PacketMath.h.

{ return _mm256_round_pd(a, _MM_FROUND_CUR_DIRECTION); }
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::pround< Packet8f > ( const Packet8f &  a)

Definition at line 186 of file AVX/PacketMath.h.

{ return _mm256_round_ps(a, _MM_FROUND_CUR_DIRECTION); }
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::prsqrt ( const Packet &  a)
Returns:
the reciprocal square-root of a (coeff-wise)

Definition at line 428 of file GenericPacketMath.h.

                               {
  return pdiv(pset1<Packet>(1), psqrt(a));
}

Definition at line 514 of file arch/SSE/MathFunctions.h.

                                             {
  // Unfortunately we can't use the much faster mm_rqsrt_pd since it only provides an approximation.
  return _mm_div_pd(pset1<Packet2d>(1.0), _mm_sqrt_pd(x));
}

Definition at line 474 of file arch/AVX/MathFunctions.h.

                                             {
  _EIGEN_DECLARE_CONST_Packet4d(one, 1.0);
  return _mm256_div_pd(p4d_one, _mm256_sqrt_pd(x));
}

Definition at line 506 of file arch/SSE/MathFunctions.h.

                                             {
  // Unfortunately we can't use the much faster mm_rqsrt_ps since it only provides an approximation.
  return _mm_div_ps(pset1<Packet4f>(1.0f), _mm_sqrt_ps(x));
}

Definition at line 467 of file arch/AVX/MathFunctions.h.

                                             {
  _EIGEN_DECLARE_CONST_Packet8f(one, 1.0f);
  return _mm256_div_ps(p8f_one, _mm256_sqrt_ps(x));
}
template<typename Scalar , typename Packet >
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter ( Scalar *  to,
const Packet &  from,
Index   
) [inline]

Definition at line 289 of file GenericPacketMath.h.

 { pstore(to, from); }
template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< double, Packet2d > ( double *  to,
const Packet2d &  from,
Index  stride 
) [inline]

Definition at line 381 of file SSE/PacketMath.h.

{
  to[stride*0] = _mm_cvtsd_f64(from);
  to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(from, from, 1));
}
template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< double, Packet4d > ( double *  to,
const Packet4d &  from,
Index  stride 
) [inline]

Definition at line 278 of file AVX/PacketMath.h.

{
  __m128d low = _mm256_extractf128_pd(from, 0);
  to[stride*0] = _mm_cvtsd_f64(low);
  to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1));
  __m128d high = _mm256_extractf128_pd(from, 1);
  to[stride*2] = _mm_cvtsd_f64(high);
  to[stride*3] = _mm_cvtsd_f64(_mm_shuffle_pd(high, high, 1));
}
template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< float, Packet4f > ( float *  to,
const Packet4f &  from,
Index  stride 
) [inline]

Definition at line 278 of file AltiVec/PacketMath.h.

{
  float EIGEN_ALIGN16 af[4];
  pstore<float>(af, from);
  to[0*stride] = af[0];
  to[1*stride] = af[1];
  to[2*stride] = af[2];
  to[3*stride] = af[3];
}
template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< float, Packet8f > ( float *  to,
const Packet8f &  from,
Index  stride 
) [inline]

Definition at line 264 of file AVX/PacketMath.h.

{
  __m128 low = _mm256_extractf128_ps(from, 0);
  to[stride*0] = _mm_cvtss_f32(low);
  to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 1));
  to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 2));
  to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 3));

  __m128 high = _mm256_extractf128_ps(from, 1);
  to[stride*4] = _mm_cvtss_f32(high);
  to[stride*5] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 1));
  to[stride*6] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 2));
  to[stride*7] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3));
}
template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< int, Packet4i > ( int *  to,
const Packet4i &  from,
Index  stride 
) [inline]

Definition at line 287 of file AltiVec/PacketMath.h.

{
  int EIGEN_ALIGN16 ai[4];
  pstore<int>((int *)ai, from);
  to[0*stride] = ai[0];
  to[1*stride] = ai[1];
  to[2*stride] = ai[2];
  to[3*stride] = ai[3];
}
template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< std::complex< double >, Packet2cd > ( std::complex< double > *  to,
const Packet2cd &  from,
Index  stride 
) [inline]

Definition at line 319 of file AVX/Complex.h.

{
  __m128d low = _mm256_extractf128_pd(from.v, 0);
  to[stride*0] = std::complex<double>(_mm_cvtsd_f64(low), _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1)));
  __m128d high = _mm256_extractf128_pd(from.v, 1);
  to[stride*1] = std::complex<double>(_mm_cvtsd_f64(high), _mm_cvtsd_f64(_mm_shuffle_pd(high, high, 1)));
}
template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< std::complex< float >, Packet2cf > ( std::complex< float > *  to,
const Packet2cf &  from,
Index  stride 
) [inline]

Definition at line 77 of file AltiVec/Complex.h.

{
  std::complex<float> EIGEN_ALIGN16 af[2];
  vec_st(from.v, 0, (float*)af);
  to[0*stride] = af[0];
  to[1*stride] = af[1];
}
template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< std::complex< float >, Packet4cf > ( std::complex< float > *  to,
const Packet4cf &  from,
Index  stride 
) [inline]

Definition at line 103 of file AVX/Complex.h.

{
  __m128 low = _mm256_extractf128_ps(from.v, 0);
  to[stride*0] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(low, low, 0)),
                                     _mm_cvtss_f32(_mm_shuffle_ps(low, low, 1)));
  to[stride*1] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(low, low, 2)),
                                     _mm_cvtss_f32(_mm_shuffle_ps(low, low, 3)));

  __m128 high = _mm256_extractf128_ps(from.v, 1);
  to[stride*2] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(high, high, 0)),
                                     _mm_cvtss_f32(_mm_shuffle_ps(high, high, 1)));
  to[stride*3] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(high, high, 2)),
                                     _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3)));

}
template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pset1 ( const typename unpacket_traits< Packet >::type a) [inline]
Returns:
a packet with constant coefficients a, e.g.: (a,a,a,a)

Definition at line 216 of file GenericPacketMath.h.

{ return a; }
template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::pset1< Packet1cd > ( const std::complex< double > &  from)

Definition at line 334 of file SSE/Complex.h.

{ /* here we really have to use unaligned loads :( */ return ploadu<Packet1cd>(&from); }
template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::pset1< Packet2cd > ( const std::complex< double > &  from)

Definition at line 301 of file AVX/Complex.h.

{
  // in case casting to a __m128d* is really not safe, then we can still fallback to this version: (much slower though)
//   return Packet2cd(_mm256_loadu2_m128d((const double*)&from,(const double*)&from));
    return Packet2cd(_mm256_broadcast_pd((const __m128d*)(const void*)&from));
}
template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pset1< Packet2cf > ( const std::complex< float > &  from)

Definition at line 58 of file AltiVec/Complex.h.

{
  Packet2cf res;
  /* On AltiVec we cannot load 64-bit registers, so wa have to take care of alignment */
  if((ptrdiff_t(&from) % 16) == 0)
    res.v = pload<Packet4f>((const float *)&from);
  else
    res.v = ploadu<Packet4f>((const float *)&from);
  res.v = vec_perm(res.v, res.v, p16uc_PSET64_HI);
  return res;
}
template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::pset1< Packet2d > ( const double &  from)

Definition at line 174 of file SSE/PacketMath.h.

{ return _mm_set1_pd(from); }
template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::pset1< Packet4cf > ( const std::complex< float > &  from)

Definition at line 79 of file AVX/Complex.h.

{
  return Packet4cf(_mm256_castpd_ps(_mm256_broadcast_sd((const double*)(const void*)&from)));
}
template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::pset1< Packet4d > ( const double &  from)

Definition at line 115 of file AVX/PacketMath.h.

{ return _mm256_set1_pd(from); }
template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pset1< Packet4f > ( const float &  from)

Definition at line 223 of file AltiVec/PacketMath.h.

                                                                            {
  // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
  float EIGEN_ALIGN16 af[4];
  af[0] = from;
  Packet4f vc = pload<Packet4f>(af);
  vc = vec_splat(vc, 0);
  return vc;
}
template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pset1< Packet4i > ( const int &  from)

Definition at line 232 of file AltiVec/PacketMath.h.

                                                                              {
  int EIGEN_ALIGN16 ai[4];
  ai[0] = from;
  Packet4i vc = pload<Packet4i>(ai);
  vc = vec_splat(vc, 0);
  return vc;
}
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::pset1< Packet8f > ( const float &  from)

Definition at line 114 of file AVX/PacketMath.h.

{ return _mm256_set1_ps(from); }
template<>
EIGEN_STRONG_INLINE Packet8i Eigen::internal::pset1< Packet8i > ( const int &  from)

Definition at line 116 of file AVX/PacketMath.h.

{ return _mm256_set1_epi32(from); }
Packet8i Eigen::internal::pshiftleft ( Packet8i  v,
int  n 
) [inline]

Definition at line 21 of file arch/AVX/MathFunctions.h.

{
#ifdef EIGEN_VECTORIZE_AVX2
  return _mm256_slli_epi32(v, n);
#else
  __m128i lo = _mm_slli_epi32(_mm256_extractf128_si256(v, 0), n);
  __m128i hi = _mm_slli_epi32(_mm256_extractf128_si256(v, 1), n);
  return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
#endif
}
Packet8f Eigen::internal::pshiftright ( Packet8f  v,
int  n 
) [inline]

Definition at line 32 of file arch/AVX/MathFunctions.h.

{
#ifdef EIGEN_VECTORIZE_AVX2
  return _mm256_cvtepi32_ps(_mm256_srli_epi32(_mm256_castps_si256(v), n));
#else
  __m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(_mm256_castps_si256(v), 0), n);
  __m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(_mm256_castps_si256(v), 1), n);
  return _mm256_cvtepi32_ps(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1));
#endif
}
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::psin ( const Packet &  a)
Returns:
the sine of a (coeff-wise)

Definition at line 376 of file GenericPacketMath.h.

{ using std::sin; return sin(a); }

Definition at line 258 of file arch/SSE/MathFunctions.h.

{
  Packet4f x = _x;
  _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);
  _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);

  _EIGEN_DECLARE_CONST_Packet4i(1, 1);
  _EIGEN_DECLARE_CONST_Packet4i(not1, ~1);
  _EIGEN_DECLARE_CONST_Packet4i(2, 2);
  _EIGEN_DECLARE_CONST_Packet4i(4, 4);

  _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(sign_mask, 0x80000000);

  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1,-0.78515625f);
  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f);
  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f);
  _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891E-4f);
  _EIGEN_DECLARE_CONST_Packet4f(sincof_p1,  8.3321608736E-3f);
  _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611E-1f);
  _EIGEN_DECLARE_CONST_Packet4f(coscof_p0,  2.443315711809948E-005f);
  _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765E-003f);
  _EIGEN_DECLARE_CONST_Packet4f(coscof_p2,  4.166664568298827E-002f);
  _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI

  Packet4f xmm1, xmm2, xmm3, sign_bit, y;

  Packet4i emm0, emm2;
  sign_bit = x;
  /* take the absolute value */
  x = pabs(x);

  /* take the modulo */

  /* extract the sign bit (upper one) */
  sign_bit = _mm_and_ps(sign_bit, p4f_sign_mask);

  /* scale by 4/Pi */
  y = pmul(x, p4f_cephes_FOPI);

  /* store the integer part of y in mm0 */
  emm2 = _mm_cvttps_epi32(y);
  /* j=(j+1) & (~1) (see the cephes sources) */
  emm2 = _mm_add_epi32(emm2, p4i_1);
  emm2 = _mm_and_si128(emm2, p4i_not1);
  y = _mm_cvtepi32_ps(emm2);
  /* get the swap sign flag */
  emm0 = _mm_and_si128(emm2, p4i_4);
  emm0 = _mm_slli_epi32(emm0, 29);
  /* get the polynom selection mask
     there is one polynom for 0 <= x <= Pi/4
     and another one for Pi/4<x<=Pi/2

     Both branches will be computed.
  */
  emm2 = _mm_and_si128(emm2, p4i_2);
  emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());

  Packet4f swap_sign_bit = _mm_castsi128_ps(emm0);
  Packet4f poly_mask = _mm_castsi128_ps(emm2);
  sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);

  /* The magic pass: "Extended precision modular arithmetic"
     x = ((x - y * DP1) - y * DP2) - y * DP3; */
  xmm1 = pmul(y, p4f_minus_cephes_DP1);
  xmm2 = pmul(y, p4f_minus_cephes_DP2);
  xmm3 = pmul(y, p4f_minus_cephes_DP3);
  x = padd(x, xmm1);
  x = padd(x, xmm2);
  x = padd(x, xmm3);

  /* Evaluate the first polynom  (0 <= x <= Pi/4) */
  y = p4f_coscof_p0;
  Packet4f z = _mm_mul_ps(x,x);

  y = pmadd(y, z, p4f_coscof_p1);
  y = pmadd(y, z, p4f_coscof_p2);
  y = pmul(y, z);
  y = pmul(y, z);
  Packet4f tmp = pmul(z, p4f_half);
  y = psub(y, tmp);
  y = padd(y, p4f_1);

  /* Evaluate the second polynom  (Pi/4 <= x <= 0) */

  Packet4f y2 = p4f_sincof_p0;
  y2 = pmadd(y2, z, p4f_sincof_p1);
  y2 = pmadd(y2, z, p4f_sincof_p2);
  y2 = pmul(y2, z);
  y2 = pmul(y2, x);
  y2 = padd(y2, x);

  /* select the correct result from the two polynoms */
  y2 = _mm_and_ps(poly_mask, y2);
  y = _mm_andnot_ps(poly_mask, y);
  y = _mm_or_ps(y,y2);
  /* update the sign */
  return _mm_xor_ps(y, sign_bit);
}

Definition at line 49 of file arch/AVX/MathFunctions.h.

                                   {
  Packet8f x = _x;

  // Some useful values.
  _EIGEN_DECLARE_CONST_Packet8i(one, 1);
  _EIGEN_DECLARE_CONST_Packet8f(one, 1.0f);
  _EIGEN_DECLARE_CONST_Packet8f(two, 2.0f);
  _EIGEN_DECLARE_CONST_Packet8f(one_over_four, 0.25f);
  _EIGEN_DECLARE_CONST_Packet8f(one_over_pi, 3.183098861837907e-01f);
  _EIGEN_DECLARE_CONST_Packet8f(neg_pi_first, -3.140625000000000e+00f);
  _EIGEN_DECLARE_CONST_Packet8f(neg_pi_second, -9.670257568359375e-04f);
  _EIGEN_DECLARE_CONST_Packet8f(neg_pi_third, -6.278329571784980e-07f);
  _EIGEN_DECLARE_CONST_Packet8f(four_over_pi, 1.273239544735163e+00f);

  // Map x from [-Pi/4,3*Pi/4] to z in [-1,3] and subtract the shifted period.
  Packet8f z = pmul(x, p8f_one_over_pi);
  Packet8f shift = _mm256_floor_ps(padd(z, p8f_one_over_four));
  x = pmadd(shift, p8f_neg_pi_first, x);
  x = pmadd(shift, p8f_neg_pi_second, x);
  x = pmadd(shift, p8f_neg_pi_third, x);
  z = pmul(x, p8f_four_over_pi);

  // Make a mask for the entries that need flipping, i.e. wherever the shift
  // is odd.
  Packet8i shift_ints = _mm256_cvtps_epi32(shift);
  Packet8i shift_isodd = _mm256_castps_si256(_mm256_and_ps(_mm256_castsi256_ps(shift_ints), _mm256_castsi256_ps(p8i_one)));
  Packet8i sign_flip_mask = pshiftleft(shift_isodd, 31);

  // Create a mask for which interpolant to use, i.e. if z > 1, then the mask
  // is set to ones for that entry.
  Packet8f ival_mask = _mm256_cmp_ps(z, p8f_one, _CMP_GT_OQ);

  // Evaluate the polynomial for the interval [1,3] in z.
  _EIGEN_DECLARE_CONST_Packet8f(coeff_right_0, 9.999999724233232e-01f);
  _EIGEN_DECLARE_CONST_Packet8f(coeff_right_2, -3.084242535619928e-01f);
  _EIGEN_DECLARE_CONST_Packet8f(coeff_right_4, 1.584991525700324e-02f);
  _EIGEN_DECLARE_CONST_Packet8f(coeff_right_6, -3.188805084631342e-04f);
  Packet8f z_minus_two = psub(z, p8f_two);
  Packet8f z_minus_two2 = pmul(z_minus_two, z_minus_two);
  Packet8f right = pmadd(p8f_coeff_right_6, z_minus_two2, p8f_coeff_right_4);
  right = pmadd(right, z_minus_two2, p8f_coeff_right_2);
  right = pmadd(right, z_minus_two2, p8f_coeff_right_0);

  // Evaluate the polynomial for the interval [-1,1] in z.
  _EIGEN_DECLARE_CONST_Packet8f(coeff_left_1, 7.853981525427295e-01f);
  _EIGEN_DECLARE_CONST_Packet8f(coeff_left_3, -8.074536727092352e-02f);
  _EIGEN_DECLARE_CONST_Packet8f(coeff_left_5, 2.489871967827018e-03f);
  _EIGEN_DECLARE_CONST_Packet8f(coeff_left_7, -3.587725841214251e-05f);
  Packet8f z2 = pmul(z, z);
  Packet8f left = pmadd(p8f_coeff_left_7, z2, p8f_coeff_left_5);
  left = pmadd(left, z2, p8f_coeff_left_3);
  left = pmadd(left, z2, p8f_coeff_left_1);
  left = pmul(left, z);

  // Assemble the results, i.e. select the left and right polynomials.
  left = _mm256_andnot_ps(ival_mask, left);
  right = _mm256_and_ps(ival_mask, right);
  Packet8f res = _mm256_or_ps(left, right);

  // Flip the sign on the odd intervals and return the result.
  res = _mm256_xor_ps(res, _mm256_castsi256_ps(sign_flip_mask));
  return res;
}
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::psinh ( const Packet &  a)
Returns:
the hyperbolic sine of a (coeff-wise)

Definition at line 400 of file GenericPacketMath.h.

{ using std::sinh; return sinh(a); }
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::psqrt ( const Packet &  a)
Returns:
the square-root of a (coeff-wise)

Definition at line 424 of file GenericPacketMath.h.

{ using std::sqrt; return sqrt(a); }

Definition at line 471 of file arch/SSE/MathFunctions.h.

{ return _mm_sqrt_pd(x); }

Definition at line 432 of file arch/AVX/MathFunctions.h.

                                            {
  return _mm256_sqrt_pd(x);
}

Definition at line 466 of file arch/SSE/MathFunctions.h.

{ return _mm_sqrt_ps(x); }

Definition at line 427 of file arch/AVX/MathFunctions.h.

                                            {
  return _mm256_sqrt_ps(x);
}
template<typename Scalar , typename Packet >
EIGEN_DEVICE_FUNC void Eigen::internal::pstore ( Scalar *  to,
const Packet &  from 
) [inline]

copy the packet from to *to, to must be 16 bytes aligned

Definition at line 279 of file GenericPacketMath.h.

{ (*to) = from; }
template<typename Packet >
void Eigen::internal::pstore1 ( typename unpacket_traits< Packet >::type to,
const typename unpacket_traits< Packet >::type a 
) [inline]

copy a packet with constant coeficient a (e.g., [a,a,a,a]) to *to. to must be 16 bytes aligned

Definition at line 467 of file GenericPacketMath.h.

{
  pstore(to, pset1<Packet>(a));
}
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore1< Packet2d > ( double *  to,
const double &  a 
)

Definition at line 401 of file SSE/PacketMath.h.

{
  Packet2d pa = _mm_set_sd(a);
  pstore(to, Packet2d(vec2d_swizzle1(pa,0,0)));
}
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore1< Packet4d > ( double *  to,
const double &  a 
)

Definition at line 293 of file AVX/PacketMath.h.

{
  Packet4d pa = pset1<Packet4d>(a);
  pstore(to, pa);
}
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore1< Packet4f > ( float *  to,
const float &  a 
)

Definition at line 395 of file SSE/PacketMath.h.

{
  Packet4f pa = _mm_set_ss(a);
  pstore(to, Packet4f(vec4f_swizzle1(pa,0,0,0,0)));
}
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore1< Packet8f > ( float *  to,
const float &  a 
)

Definition at line 288 of file AVX/PacketMath.h.

{
  Packet8f pa = pset1<Packet8f>(a);
  pstore(to, pa);
}
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore1< Packet8i > ( int *  to,
const int &  a 
)

Definition at line 298 of file AVX/PacketMath.h.

{
  Packet8i pa = pset1<Packet8i>(a);
  pstore(to, pa);
}
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< double > ( double *  to,
const Packet4d &  from 
)

Definition at line 245 of file AVX/PacketMath.h.

{ EIGEN_DEBUG_ALIGNED_STORE _mm256_store_pd(to, from); }
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< double > ( double *  to,
const Packet2d &  from 
)

Definition at line 354 of file SSE/PacketMath.h.

{ EIGEN_DEBUG_ALIGNED_STORE _mm_store_pd(to, from); }
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< float > ( float *  to,
const Packet4f &  from 
)

Definition at line 220 of file AltiVec/PacketMath.h.

{ EIGEN_DEBUG_ALIGNED_STORE vec_st(from, 0, to); }
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< float > ( float *  to,
const Packet8f &  from 
)

Definition at line 244 of file AVX/PacketMath.h.

{ EIGEN_DEBUG_ALIGNED_STORE _mm256_store_ps(to, from); }
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< int > ( int *  to,
const Packet4i &  from 
)

Definition at line 221 of file AltiVec/PacketMath.h.

{ EIGEN_DEBUG_ALIGNED_STORE vec_st(from, 0, to); }
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< int > ( int *  to,
const Packet8i &  from 
)

Definition at line 246 of file AVX/PacketMath.h.

{ EIGEN_DEBUG_ALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); }
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< std::complex< double > > ( std::complex< double > *  to,
const Packet2cd &  from 
)

Definition at line 310 of file AVX/Complex.h.

{ EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); }
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< std::complex< double > > ( std::complex< double > *  to,
const Packet1cd &  from 
)

Definition at line 340 of file SSE/Complex.h.

{ EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, Packet2d(from.v)); }
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< std::complex< float > > ( std::complex< float > *  to,
const Packet4cf &  from 
)

Definition at line 92 of file AVX/Complex.h.

{ EIGEN_DEBUG_ALIGNED_STORE pstore(&numext::real_ref(*to), from.v); }
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< std::complex< float > > ( std::complex< float > *  to,
const Packet2cf &  from 
)

Definition at line 123 of file AltiVec/Complex.h.

{ EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); }
template<typename Scalar , typename Packet , int Alignment>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void Eigen::internal::pstoret ( Scalar *  to,
const Packet &  from 
)

copy the packet from to *to. The pointer from must be aligned on a Alignment bytes boundary.

Definition at line 493 of file GenericPacketMath.h.

{
  if(Alignment >= unpacket_traits<Packet>::alignment)
    pstore(to, from);
  else
    pstoreu(to, from);
}
template<typename Scalar , typename Packet >
EIGEN_DEVICE_FUNC void Eigen::internal::pstoreu ( Scalar *  to,
const Packet &  from 
) [inline]

copy the packet from to *to, (un-aligned store)

Definition at line 283 of file GenericPacketMath.h.

{  (*to) = from; }
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< double > ( double *  to,
const Packet4d &  from 
)

Definition at line 249 of file AVX/PacketMath.h.

{ EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_pd(to, from); }
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< double > ( double *  to,
const Packet2d &  from 
)

Definition at line 357 of file SSE/PacketMath.h.

{ EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_pd(to, from); }
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< float > ( float *  to,
const Packet8f &  from 
)

Definition at line 248 of file AVX/PacketMath.h.

{ EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_ps(to, from); }
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< float > ( float *  to,
const Packet4f &  from 
)

Definition at line 490 of file AltiVec/PacketMath.h.

{
  EIGEN_DEBUG_ALIGNED_STORE
  vec_vsx_st(from, (long)to & 15, (float*) _EIGEN_ALIGNED_PTR(to));
}
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< int > ( int *  to,
const Packet8i &  from 
)

Definition at line 250 of file AVX/PacketMath.h.

{ EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); }
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< int > ( int *  to,
const Packet4i &  from 
)

Definition at line 485 of file AltiVec/PacketMath.h.

{
  EIGEN_DEBUG_ALIGNED_STORE
  vec_vsx_st(from, (long)to & 15, (int*) _EIGEN_ALIGNED_PTR(to));
}
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< std::complex< double > > ( std::complex< double > *  to,
const Packet2cd &  from 
)

Definition at line 311 of file AVX/Complex.h.

{ EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); }
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< std::complex< double > > ( std::complex< double > *  to,
const Packet1cd &  from 
)

Definition at line 341 of file SSE/Complex.h.

{ EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, Packet2d(from.v)); }
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< std::complex< float > > ( std::complex< float > *  to,
const Packet4cf &  from 
)

Definition at line 93 of file AVX/Complex.h.

{ EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), from.v); }
template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< std::complex< float > > ( std::complex< float > *  to,
const Packet2cf &  from 
)

Definition at line 124 of file AltiVec/Complex.h.

{ EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); }
template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::psub ( const Packet &  a,
const Packet &  b 
) [inline]
Returns:
a - b (coeff-wise)

Definition at line 150 of file GenericPacketMath.h.

                         { return a-b; }
template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::psub< Packet1cd > ( const Packet1cd &  a,
const Packet1cd &  b 
)

Definition at line 302 of file SSE/Complex.h.

{ return Packet1cd(_mm_sub_pd(a.v,b.v)); }
template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::psub< Packet2cd > ( const Packet2cd &  a,
const Packet2cd &  b 
)

Definition at line 273 of file AVX/Complex.h.

{ return Packet2cd(_mm256_sub_pd(a.v,b.v)); }
template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::psub< Packet2cf > ( const Packet2cf &  a,
const Packet2cf &  b 
)

Definition at line 87 of file AltiVec/Complex.h.

{ return Packet2cf(vec_sub(a.v,b.v)); }
template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::psub< Packet2d > ( const Packet2d &  a,
const Packet2d &  b 
)

Definition at line 198 of file SSE/PacketMath.h.

{ return _mm_sub_pd(a,b); }
template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::psub< Packet4cf > ( const Packet4cf &  a,
const Packet4cf &  b 
)

Definition at line 51 of file AVX/Complex.h.

{ return Packet4cf(_mm256_sub_ps(a.v,b.v)); }
template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::psub< Packet4d > ( const Packet4d &  a,
const Packet4d &  b 
)

Definition at line 128 of file AVX/PacketMath.h.

{ return _mm256_sub_pd(a,b); }
template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::psub< Packet4f > ( const Packet4f &  a,
const Packet4f &  b 
)

Definition at line 303 of file AltiVec/PacketMath.h.

{ return vec_sub(a,b); }
template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::psub< Packet4i > ( const Packet4i &  a,
const Packet4i &  b 
)

Definition at line 304 of file AltiVec/PacketMath.h.

{ return vec_sub(a,b); }
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::psub< Packet8f > ( const Packet8f &  a,
const Packet8f &  b 
)

Definition at line 127 of file AVX/PacketMath.h.

{ return _mm256_sub_ps(a,b); }
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::ptan ( const Packet &  a)
Returns:
the tan of a (coeff-wise)

Definition at line 384 of file GenericPacketMath.h.

{ using std::tan; return tan(a); }
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::ptanh ( const Packet &  a)
Returns:
the hyperbolic tan of a (coeff-wise)

Definition at line 408 of file GenericPacketMath.h.

{ using std::tanh; return tanh(a); }

Definition at line 525 of file arch/SSE/MathFunctions.h.

                                    {
  // Clamp the inputs to the range [-9, 9] since anything outside
  // this range is +/-1.0f in single-precision.
  _EIGEN_DECLARE_CONST_Packet4f(plus_9, 9.0f);
  _EIGEN_DECLARE_CONST_Packet4f(minus_9, -9.0f);
  const Packet4f x = pmax(p4f_minus_9, pmin(p4f_plus_9, _x));

  // The monomial coefficients of the numerator polynomial (odd).
  _EIGEN_DECLARE_CONST_Packet4f(alpha_1, 4.89352455891786e-03f);
  _EIGEN_DECLARE_CONST_Packet4f(alpha_3, 6.37261928875436e-04f);
  _EIGEN_DECLARE_CONST_Packet4f(alpha_5, 1.48572235717979e-05f);
  _EIGEN_DECLARE_CONST_Packet4f(alpha_7, 5.12229709037114e-08f);
  _EIGEN_DECLARE_CONST_Packet4f(alpha_9, -8.60467152213735e-11f);
  _EIGEN_DECLARE_CONST_Packet4f(alpha_11, 2.00018790482477e-13f);
  _EIGEN_DECLARE_CONST_Packet4f(alpha_13, -2.76076847742355e-16f);

  // The monomial coefficients of the denominator polynomial (even).
  _EIGEN_DECLARE_CONST_Packet4f(beta_0, 4.89352518554385e-03f);
  _EIGEN_DECLARE_CONST_Packet4f(beta_2, 2.26843463243900e-03f);
  _EIGEN_DECLARE_CONST_Packet4f(beta_4, 1.18534705686654e-04f);
  _EIGEN_DECLARE_CONST_Packet4f(beta_6, 1.19825839466702e-06f);

  // Since the polynomials are odd/even, we need x^2.
  const Packet4f x2 = pmul(x, x);

  // Evaluate the numerator polynomial p.
  Packet4f p = pmadd(x2, p4f_alpha_13, p4f_alpha_11);
  p = pmadd(x2, p, p4f_alpha_9);
  p = pmadd(x2, p, p4f_alpha_7);
  p = pmadd(x2, p, p4f_alpha_5);
  p = pmadd(x2, p, p4f_alpha_3);
  p = pmadd(x2, p, p4f_alpha_1);
  p = pmul(x, p);

  // Evaluate the denominator polynomial p.
  Packet4f q = pmadd(x2, p4f_beta_6, p4f_beta_4);
  q = pmadd(x2, q, p4f_beta_2);
  q = pmadd(x2, q, p4f_beta_0);

  // Divide the numerator by the denominator.
  return pdiv(p, q);
}

Definition at line 274 of file arch/AVX/MathFunctions.h.

                                    {
  // Clamp the inputs to the range [-9, 9] since anything outside
  // this range is +/-1.0f in single-precision.
  _EIGEN_DECLARE_CONST_Packet8f(plus_9, 9.0f);
  _EIGEN_DECLARE_CONST_Packet8f(minus_9, -9.0f);
  const Packet8f x = pmax(p8f_minus_9, pmin(p8f_plus_9, _x));

  // The monomial coefficients of the numerator polynomial (odd).
  _EIGEN_DECLARE_CONST_Packet8f(alpha_1, 4.89352455891786e-03f);
  _EIGEN_DECLARE_CONST_Packet8f(alpha_3, 6.37261928875436e-04f);
  _EIGEN_DECLARE_CONST_Packet8f(alpha_5, 1.48572235717979e-05f);
  _EIGEN_DECLARE_CONST_Packet8f(alpha_7, 5.12229709037114e-08f);
  _EIGEN_DECLARE_CONST_Packet8f(alpha_9, -8.60467152213735e-11f);
  _EIGEN_DECLARE_CONST_Packet8f(alpha_11, 2.00018790482477e-13f);
  _EIGEN_DECLARE_CONST_Packet8f(alpha_13, -2.76076847742355e-16f);

  // The monomial coefficients of the denominator polynomial (even).
  _EIGEN_DECLARE_CONST_Packet8f(beta_0, 4.89352518554385e-03f);
  _EIGEN_DECLARE_CONST_Packet8f(beta_2, 2.26843463243900e-03f);
  _EIGEN_DECLARE_CONST_Packet8f(beta_4, 1.18534705686654e-04f);
  _EIGEN_DECLARE_CONST_Packet8f(beta_6, 1.19825839466702e-06f);

  // Since the polynomials are odd/even, we need x^2.
  const Packet8f x2 = pmul(x, x);

  // Evaluate the numerator polynomial p.
  Packet8f p = pmadd(x2, p8f_alpha_13, p8f_alpha_11);
  p = pmadd(x2, p, p8f_alpha_9);
  p = pmadd(x2, p, p8f_alpha_7);
  p = pmadd(x2, p, p8f_alpha_5);
  p = pmadd(x2, p, p8f_alpha_3);
  p = pmadd(x2, p, p8f_alpha_1);
  p = pmul(x, p);

  // Evaluate the denominator polynomial p.
  Packet8f q = pmadd(x2, p8f_beta_6, p8f_beta_4);
  q = pmadd(x2, q, p8f_beta_2);
  q = pmadd(x2, q, p8f_beta_0);

  // Divide the numerator by the denominator.
  return pdiv(p, q);
}
EIGEN_DEVICE_FUNC void Eigen::internal::ptranspose ( PacketBlock< Packet2cf, 2 > &  kernel) [inline]

Definition at line 239 of file AltiVec/Complex.h.

{
  Packet4f tmp = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_HI);
  kernel.packet[1].v = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_LO);
  kernel.packet[0].v = tmp;
}
EIGEN_DEVICE_FUNC void Eigen::internal::ptranspose ( PacketBlock< Packet4cf, 4 > &  kernel) [inline]

Definition at line 435 of file AVX/Complex.h.

                                             {
  __m256d P0 = _mm256_castps_pd(kernel.packet[0].v);
  __m256d P1 = _mm256_castps_pd(kernel.packet[1].v);
  __m256d P2 = _mm256_castps_pd(kernel.packet[2].v);
  __m256d P3 = _mm256_castps_pd(kernel.packet[3].v);

  __m256d T0 = _mm256_shuffle_pd(P0, P1, 15);
  __m256d T1 = _mm256_shuffle_pd(P0, P1, 0);
  __m256d T2 = _mm256_shuffle_pd(P2, P3, 15);
  __m256d T3 = _mm256_shuffle_pd(P2, P3, 0);

  kernel.packet[1].v = _mm256_castpd_ps(_mm256_permute2f128_pd(T0, T2, 32));
  kernel.packet[3].v = _mm256_castpd_ps(_mm256_permute2f128_pd(T0, T2, 49));
  kernel.packet[0].v = _mm256_castpd_ps(_mm256_permute2f128_pd(T1, T3, 32));
  kernel.packet[2].v = _mm256_castpd_ps(_mm256_permute2f128_pd(T1, T3, 49));
}
EIGEN_DEVICE_FUNC void Eigen::internal::ptranspose ( PacketBlock< Packet2cd, 2 > &  kernel) [inline]

Definition at line 453 of file AVX/Complex.h.

                                             {
  __m256d tmp = _mm256_permute2f128_pd(kernel.packet[0].v, kernel.packet[1].v, 0+(2<<4));
  kernel.packet[1].v = _mm256_permute2f128_pd(kernel.packet[0].v, kernel.packet[1].v, 1+(3<<4));
 kernel.packet[0].v = tmp;
}
EIGEN_DEVICE_FUNC void Eigen::internal::ptranspose ( PacketBlock< Packet8f, 8 > &  kernel) [inline]

Definition at line 533 of file AVX/PacketMath.h.

                                            {
  __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
  __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
  __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
  __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
  __m256 T4 = _mm256_unpacklo_ps(kernel.packet[4], kernel.packet[5]);
  __m256 T5 = _mm256_unpackhi_ps(kernel.packet[4], kernel.packet[5]);
  __m256 T6 = _mm256_unpacklo_ps(kernel.packet[6], kernel.packet[7]);
  __m256 T7 = _mm256_unpackhi_ps(kernel.packet[6], kernel.packet[7]);
  __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));
  __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));
  __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));
  __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));
  __m256 S4 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(1,0,1,0));
  __m256 S5 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(3,2,3,2));
  __m256 S6 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(1,0,1,0));
  __m256 S7 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(3,2,3,2));
  kernel.packet[0] = _mm256_permute2f128_ps(S0, S4, 0x20);
  kernel.packet[1] = _mm256_permute2f128_ps(S1, S5, 0x20);
  kernel.packet[2] = _mm256_permute2f128_ps(S2, S6, 0x20);
  kernel.packet[3] = _mm256_permute2f128_ps(S3, S7, 0x20);
  kernel.packet[4] = _mm256_permute2f128_ps(S0, S4, 0x31);
  kernel.packet[5] = _mm256_permute2f128_ps(S1, S5, 0x31);
  kernel.packet[6] = _mm256_permute2f128_ps(S2, S6, 0x31);
  kernel.packet[7] = _mm256_permute2f128_ps(S3, S7, 0x31);
}
EIGEN_DEVICE_FUNC void Eigen::internal::ptranspose ( PacketBlock< Packet8f, 4 > &  kernel) [inline]

Definition at line 561 of file AVX/PacketMath.h.

                                            {
  __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
  __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
  __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
  __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);

  __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));
  __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));
  __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));
  __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));

  kernel.packet[0] = _mm256_permute2f128_ps(S0, S1, 0x20);
  kernel.packet[1] = _mm256_permute2f128_ps(S2, S3, 0x20);
  kernel.packet[2] = _mm256_permute2f128_ps(S0, S1, 0x31);
  kernel.packet[3] = _mm256_permute2f128_ps(S2, S3, 0x31);
}
template<typename Packet >
EIGEN_DEVICE_FUNC void Eigen::internal::ptranspose ( PacketBlock< Packet, 1 > &  ) [inline]

Definition at line 566 of file GenericPacketMath.h.

                                     {
  // Nothing to do in the scalar case, i.e. a 1x1 matrix.
}
EIGEN_DEVICE_FUNC void Eigen::internal::ptranspose ( PacketBlock< Packet4d, 4 > &  kernel) [inline]

Definition at line 579 of file AVX/PacketMath.h.

                                            {
  __m256d T0 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 15);
  __m256d T1 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 0);
  __m256d T2 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 15);
  __m256d T3 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 0);

  kernel.packet[1] = _mm256_permute2f128_pd(T0, T2, 32);
  kernel.packet[3] = _mm256_permute2f128_pd(T0, T2, 49);
  kernel.packet[0] = _mm256_permute2f128_pd(T1, T3, 32);
  kernel.packet[2] = _mm256_permute2f128_pd(T1, T3, 49);
}
EIGEN_DEVICE_FUNC void Eigen::internal::ptranspose ( PacketBlock< Packet4f, 4 > &  kernel) [inline]

Definition at line 694 of file AltiVec/PacketMath.h.

                                            {
  Packet4f t0, t1, t2, t3;
  t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]);
  t1 = vec_mergel(kernel.packet[0], kernel.packet[2]);
  t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]);
  t3 = vec_mergel(kernel.packet[1], kernel.packet[3]);
  kernel.packet[0] = vec_mergeh(t0, t2);
  kernel.packet[1] = vec_mergel(t0, t2);
  kernel.packet[2] = vec_mergeh(t1, t3);
  kernel.packet[3] = vec_mergel(t1, t3);
}
EIGEN_DEVICE_FUNC void Eigen::internal::ptranspose ( PacketBlock< Packet4i, 4 > &  kernel) [inline]

Definition at line 707 of file AltiVec/PacketMath.h.

                                            {
  Packet4i t0, t1, t2, t3;
  t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]);
  t1 = vec_mergel(kernel.packet[0], kernel.packet[2]);
  t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]);
  t3 = vec_mergel(kernel.packet[1], kernel.packet[3]);
  kernel.packet[0] = vec_mergeh(t0, t2);
  kernel.packet[1] = vec_mergel(t0, t2);
  kernel.packet[2] = vec_mergeh(t1, t3);
  kernel.packet[3] = vec_mergel(t1, t3);
}
EIGEN_DEVICE_FUNC void Eigen::internal::ptranspose ( PacketBlock< Packet2d, 2 > &  kernel) [inline]

Definition at line 790 of file SSE/PacketMath.h.

                                            {
  __m128d tmp = _mm_unpackhi_pd(kernel.packet[0], kernel.packet[1]);
  kernel.packet[0] = _mm_unpacklo_pd(kernel.packet[0], kernel.packet[1]);
  kernel.packet[1] = tmp;
}

Definition at line 513 of file SSE/PacketMath.h.

{
  vecs[1] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x55));
  vecs[2] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xAA));
  vecs[3] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xFF));
  vecs[0] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x00));
}
template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pxor ( const Packet &  a,
const Packet &  b 
) [inline]
Returns:
the bitwise xor of a and b

Definition at line 200 of file GenericPacketMath.h.

{ return a ^ b; }
template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::pxor< Packet1cd > ( const Packet1cd &  a,
const Packet1cd &  b 
)

Definition at line 326 of file SSE/Complex.h.

{ return Packet1cd(_mm_xor_pd(a.v,b.v)); }
template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::pxor< Packet2cd > ( const Packet2cd &  a,
const Packet2cd &  b 
)

Definition at line 293 of file AVX/Complex.h.

{ return Packet2cd(_mm256_xor_pd(a.v,b.v)); }
template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pxor< Packet2cf > ( const Packet2cf &  a,
const Packet2cf &  b 
)

Definition at line 112 of file AltiVec/Complex.h.

{ return Packet2cf(vec_xor(a.v,b.v)); }
template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::pxor< Packet2d > ( const Packet2d &  a,
const Packet2d &  b 
)

Definition at line 294 of file SSE/PacketMath.h.

{ return _mm_xor_pd(a,b); }
template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::pxor< Packet4cf > ( const Packet4cf &  a,
const Packet4cf &  b 
)

Definition at line 72 of file AVX/Complex.h.

{ return Packet4cf(_mm256_xor_ps(a.v,b.v)); }
template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::pxor< Packet4d > ( const Packet4d &  a,
const Packet4d &  b 
)

Definition at line 202 of file AVX/PacketMath.h.

{ return _mm256_xor_pd(a,b); }
template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pxor< Packet4f > ( const Packet4f &  a,
const Packet4f &  b 
)

Definition at line 388 of file AltiVec/PacketMath.h.

{ return vec_xor(a, b); }
template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pxor< Packet4i > ( const Packet4i &  a,
const Packet4i &  b 
)

Definition at line 389 of file AltiVec/PacketMath.h.

{ return vec_xor(a, b); }
template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::pxor< Packet8f > ( const Packet8f &  a,
const Packet8f &  b 
)

Definition at line 201 of file AVX/PacketMath.h.

{ return _mm256_xor_ps(a,b); }
void Eigen::internal::queryCacheSizes ( int &  l1,
int &  l2,
int &  l3 
) [inline]

Queries and returns the cache sizes in Bytes of the L1, L2, and L3 data caches respectively

Definition at line 917 of file Memory.h.

{
  #ifdef EIGEN_CPUID
  int abcd[4];
  const int GenuineIntel[] = {0x756e6547, 0x49656e69, 0x6c65746e};
  const int AuthenticAMD[] = {0x68747541, 0x69746e65, 0x444d4163};
  const int AMDisbetter_[] = {0x69444d41, 0x74656273, 0x21726574}; // "AMDisbetter!"

  // identify the CPU vendor
  EIGEN_CPUID(abcd,0x0,0);
  int max_std_funcs = abcd[1];
  if(cpuid_is_vendor(abcd,GenuineIntel))
    queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
  else if(cpuid_is_vendor(abcd,AuthenticAMD) || cpuid_is_vendor(abcd,AMDisbetter_))
    queryCacheSizes_amd(l1,l2,l3);
  else
    // by default let's use Intel's API
    queryCacheSizes_intel(l1,l2,l3,max_std_funcs);

  // here is the list of other vendors:
//   ||cpuid_is_vendor(abcd,"VIA VIA VIA ")
//   ||cpuid_is_vendor(abcd,"CyrixInstead")
//   ||cpuid_is_vendor(abcd,"CentaurHauls")
//   ||cpuid_is_vendor(abcd,"GenuineTMx86")
//   ||cpuid_is_vendor(abcd,"TransmetaCPU")
//   ||cpuid_is_vendor(abcd,"RiseRiseRise")
//   ||cpuid_is_vendor(abcd,"Geode by NSC")
//   ||cpuid_is_vendor(abcd,"SiS SiS SiS ")
//   ||cpuid_is_vendor(abcd,"UMC UMC UMC ")
//   ||cpuid_is_vendor(abcd,"NexGenDriven")
  #else
  l1 = l2 = l3 = -1;
  #endif
}
Returns:
the size in Bytes of the L1 data cache

Definition at line 954 of file Memory.h.

{
  int l1(-1), l2, l3;
  queryCacheSizes(l1,l2,l3);
  return l1;
}
Returns:
the size in Bytes of the L2 or L3 cache if this later is present

Definition at line 963 of file Memory.h.

{
  int l1, l2(-1), l3(-1);
  queryCacheSizes(l1,l2,l3);
  return (std::max)(l2,l3);
}
template<typename VectorV , typename VectorI >
Index Eigen::internal::QuickSplit ( VectorV &  row,
VectorI &  ind,
Index  ncut 
)

Compute a quick-sort split of a vector On output, the vector row is permuted such that its elements satisfy abs(row(i)) >= abs(row(ncut)) if i<ncut abs(row(i)) <= abs(row(ncut)) if i>ncut

Parameters:
rowThe vector of values
indThe array of index for the elements in row
ncutThe number of largest elements to keep

Definition at line 29 of file IncompleteLUT.h.

{
  typedef typename VectorV::RealScalar RealScalar;
  using std::swap;
  using std::abs;
  Index mid;
  Index n = row.size(); /* length of the vector */
  Index first, last ;
  
  ncut--; /* to fit the zero-based indices */
  first = 0; 
  last = n-1; 
  if (ncut < first || ncut > last ) return 0;
  
  do {
    mid = first; 
    RealScalar abskey = abs(row(mid)); 
    for (Index j = first + 1; j <= last; j++) {
      if ( abs(row(j)) > abskey) {
        ++mid;
        swap(row(mid), row(j));
        swap(ind(mid), ind(j));
      }
    }
    /* Interchange for the pivot element */
    swap(row(mid), row(first));
    swap(ind(mid), ind(first));
    
    if (mid > ncut) last = mid - 1;
    else if (mid < ncut ) first = mid + 1; 
  } while (mid != ncut );
  
  return 0; /* mid is equal to ncut */ 
}
template<typename MatrixType , typename RealScalar , typename Index >
void Eigen::internal::real_2x2_jacobi_svd ( const MatrixType &  matrix,
Index  p,
Index  q,
JacobiRotation< RealScalar > *  j_left,
JacobiRotation< RealScalar > *  j_right 
)

Definition at line 405 of file JacobiSVD.h.

{
  using std::sqrt;
  using std::abs;
  Matrix<RealScalar,2,2> m;
  m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)),
       numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q));
  JacobiRotation<RealScalar> rot1;
  RealScalar t = m.coeff(0,0) + m.coeff(1,1);
  RealScalar d = m.coeff(1,0) - m.coeff(0,1);
  
  if(d == RealScalar(0))
  {
    rot1.s() = RealScalar(0);
    rot1.c() = RealScalar(1);
  }
  else
  {
    // If d!=0, then t/d cannot overflow because the magnitude of the
    // entries forming d are not too small compared to the ones forming t.
    RealScalar u = t / d;
    RealScalar tmp = sqrt(RealScalar(1) + numext::abs2(u));
    rot1.s() = RealScalar(1) / tmp;
    rot1.c() = u / tmp;
  }
  m.applyOnTheLeft(0,1,rot1);
  j_right->makeJacobi(m,0,1);
  *j_left = rot1 * j_right->transpose();
}
template<typename InputIterator , typename SparseMatrixType , typename DupFunctor >
void Eigen::internal::set_from_triplets ( const InputIterator &  begin,
const InputIterator &  end,
SparseMatrixType &  mat,
DupFunctor  dup_func 
)

Definition at line 907 of file SparseMatrix.h.

{
  enum { IsRowMajor = SparseMatrixType::IsRowMajor };
  typedef typename SparseMatrixType::Scalar Scalar;
  typedef typename SparseMatrixType::StorageIndex StorageIndex;
  SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());

  if(begin!=end)
  {
    // pass 1: count the nnz per inner-vector
    typename SparseMatrixType::IndexVector wi(trMat.outerSize());
    wi.setZero();
    for(InputIterator it(begin); it!=end; ++it)
    {
      eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
      wi(IsRowMajor ? it->col() : it->row())++;
    }

    // pass 2: insert all the elements into trMat
    trMat.reserve(wi);
    for(InputIterator it(begin); it!=end; ++it)
      trMat.insertBackUncompressed(it->row(),it->col()) = it->value();

    // pass 3:
    trMat.collapseDuplicates(dup_func);
  }

  // pass 4: transposed copy -> implicit sorting
  mat = trMat;
}
template<typename T >
EIGEN_DEVICE_FUNC void Eigen::internal::smart_copy ( const T start,
const T end,
T target 
)

Definition at line 482 of file Memory.h.

{
  smart_copy_helper<T,!NumTraits<T>::RequireInitialization>::run(start, end, target);
}
template<typename T >
void Eigen::internal::smart_memmove ( const T start,
const T end,
T target 
)

Definition at line 505 of file Memory.h.

{
  smart_memmove_helper<T,!NumTraits<T>::RequireInitialization>::run(start, end, target);
}
template<typename Decomposition , typename Rhs , typename Dest >
void Eigen::internal::solve_sparse_through_dense_panels ( const Decomposition &  dec,
const Rhs &  rhs,
Dest &  dest 
)

Helper functions to solve with a sparse right-hand-side and result. The rhs is decomposed into small vertical panels which are solved through dense temporaries.

Definition at line 22 of file SparseSolverBase.h.

{
  EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
  typedef typename Dest::Scalar DestScalar;
  // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix.
  static const Index NbColsAtOnce = 4;
  Index rhsCols = rhs.cols();
  Index size = rhs.rows();
  // the temporary matrices do not need more columns than NbColsAtOnce:
  Index tmpCols = (std::min)(rhsCols, NbColsAtOnce); 
  Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmp(size,tmpCols);
  Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmpX(size,tmpCols);
  for(Index k=0; k<rhsCols; k+=NbColsAtOnce)
  {
    Index actualCols = std::min<Index>(rhsCols-k, NbColsAtOnce);
    tmp.leftCols(actualCols) = rhs.middleCols(k,actualCols);
    tmpX.leftCols(actualCols) = dec.solve(tmp.leftCols(actualCols));
    dest.middleCols(k,actualCols) = tmpX.leftCols(actualCols).sparseView();
  }
}
template<int Mode, typename SparseLhsType , typename DenseRhsType , typename DenseResType , typename AlphaType >
void Eigen::internal::sparse_selfadjoint_time_dense_product ( const SparseLhsType &  lhs,
const DenseRhsType &  rhs,
DenseResType &  res,
const AlphaType &  alpha 
) [inline]

Definition at line 250 of file SparseSelfAdjointView.h.

{
  EIGEN_ONLY_USED_FOR_DEBUG(alpha);
  // TODO use alpha
  eigen_assert(alpha==AlphaType(1) && "alpha != 1 is not implemented yet, sorry");
  
  typedef evaluator<SparseLhsType> LhsEval;
  typedef typename evaluator<SparseLhsType>::InnerIterator LhsIterator;
  typedef typename SparseLhsType::Scalar LhsScalar;
  
  enum {
    LhsIsRowMajor = (LhsEval::Flags&RowMajorBit)==RowMajorBit,
    ProcessFirstHalf =
              ((Mode&(Upper|Lower))==(Upper|Lower))
          || ( (Mode&Upper) && !LhsIsRowMajor)
          || ( (Mode&Lower) && LhsIsRowMajor),
    ProcessSecondHalf = !ProcessFirstHalf
  };
  
  LhsEval lhsEval(lhs);
  
  for (Index j=0; j<lhs.outerSize(); ++j)
  {
    LhsIterator i(lhsEval,j);
    if (ProcessSecondHalf)
    {
      while (i && i.index()<j) ++i;
      if(i && i.index()==j)
      {
        res.row(j) += i.value() * rhs.row(j);
        ++i;
      }
    }
    for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
    {
      Index a = LhsIsRowMajor ? j : i.index();
      Index b = LhsIsRowMajor ? i.index() : j;
      LhsScalar v = i.value();
      res.row(a) += (v) * rhs.row(b);
      res.row(b) += numext::conj(v) * rhs.row(a);
    }
    if (ProcessFirstHalf && i && (i.index()==j))
      res.row(j) += i.value() * rhs.row(j);
  }
}
template<typename Lhs , typename Rhs , typename ResultType >
static void Eigen::internal::sparse_sparse_product_with_pruning_impl ( const Lhs &  lhs,
const Rhs &  rhs,
ResultType &  res,
const typename ResultType::RealScalar &  tolerance 
) [static]

Definition at line 20 of file SparseSparseProductWithPruning.h.

{
  // return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res);

  typedef typename remove_all<Lhs>::type::Scalar Scalar;
  typedef typename remove_all<Lhs>::type::StorageIndex StorageIndex;

  // make sure to call innerSize/outerSize since we fake the storage order.
  Index rows = lhs.innerSize();
  Index cols = rhs.outerSize();
  //Index size = lhs.outerSize();
  eigen_assert(lhs.outerSize() == rhs.innerSize());

  // allocate a temporary buffer
  AmbiVector<Scalar,StorageIndex> tempVector(rows);

  // mimics a resizeByInnerOuter:
  if(ResultType::IsRowMajor)
    res.resize(cols, rows);
  else
    res.resize(rows, cols);
  
  evaluator<Lhs> lhsEval(lhs);
  evaluator<Rhs> rhsEval(rhs);
  
  // estimate the number of non zero entries
  // given a rhs column containing Y non zeros, we assume that the respective Y columns
  // of the lhs differs in average of one non zeros, thus the number of non zeros for
  // the product of a rhs column with the lhs is X+Y where X is the average number of non zero
  // per column of the lhs.
  // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
  Index estimated_nnz_prod = lhsEval.nonZerosEstimate() + rhsEval.nonZerosEstimate();

  res.reserve(estimated_nnz_prod);
  double ratioColRes = double(estimated_nnz_prod)/double(lhs.rows()*rhs.cols());
  for (Index j=0; j<cols; ++j)
  {
    // FIXME:
    //double ratioColRes = (double(rhs.innerVector(j).nonZeros()) + double(lhs.nonZeros())/double(lhs.cols()))/double(lhs.rows());
    // let's do a more accurate determination of the nnz ratio for the current column j of res
    tempVector.init(ratioColRes);
    tempVector.setZero();
    for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
    {
      // FIXME should be written like this: tmp += rhsIt.value() * lhs.col(rhsIt.index())
      tempVector.restart();
      Scalar x = rhsIt.value();
      for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, rhsIt.index()); lhsIt; ++lhsIt)
      {
        tempVector.coeffRef(lhsIt.index()) += lhsIt.value() * x;
      }
    }
    res.startVec(j);
    for (typename AmbiVector<Scalar,StorageIndex>::Iterator it(tempVector,tolerance); it; ++it)
      res.insertBackByOuterInner(j,it.index()) = it.value();
  }
  res.finalize();
}
template<typename Lhs , typename Rhs , typename ResultType >
static void Eigen::internal::sparse_sparse_to_dense_product_impl ( const Lhs &  lhs,
const Rhs &  rhs,
ResultType &  res 
) [static]

Definition at line 264 of file ConservativeSparseSparseProduct.h.

{
  typedef typename remove_all<Lhs>::type::Scalar Scalar;
  Index cols = rhs.outerSize();
  eigen_assert(lhs.outerSize() == rhs.innerSize());

  evaluator<Lhs> lhsEval(lhs);
  evaluator<Rhs> rhsEval(rhs);

  for (Index j=0; j<cols; ++j)
  {
    for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
    {
      Scalar y = rhsIt.value();
      Index k = rhsIt.index();
      for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, k); lhsIt; ++lhsIt)
      {
        Index i = lhsIt.index();
        Scalar x = lhsIt.value();
        res.coeffRef(i,j) += x * y;
      }
    }
  }
}
template<typename SparseLhsType , typename DenseRhsType , typename DenseResType , typename AlphaType >
void Eigen::internal::sparse_time_dense_product ( const SparseLhsType &  lhs,
const DenseRhsType &  rhs,
DenseResType &  res,
const AlphaType &  alpha 
) [inline]

Definition at line 145 of file SparseDenseProduct.h.

{
  sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType>::run(lhs, rhs, res, alpha);
}
template<typename Scalar >
EIGEN_DONT_INLINE void Eigen::internal::sparselu_gemm ( Index  m,
Index  n,
Index  d,
const Scalar *  A,
Index  lda,
const Scalar *  B,
Index  ldb,
Scalar *  C,
Index  ldc 
)

A general matrix-matrix product kernel optimized for the SparseLU factorization.

  • A, B, and C must be column major
  • lda and ldc must be multiples of the respective packet size
  • C must have the same alignment as A

Definition at line 26 of file SparseLU_gemm_kernel.h.

{
  using namespace Eigen::internal;
  
  typedef typename packet_traits<Scalar>::type Packet;
  enum {
    NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
    PacketSize = packet_traits<Scalar>::size,
    PM = 8,                             // peeling in M
    RN = 2,                             // register blocking
    RK = NumberOfRegisters>=16 ? 4 : 2, // register blocking
    BM = 4096/sizeof(Scalar),           // number of rows of A-C per chunk
    SM = PM*PacketSize                  // step along M
  };
  Index d_end = (d/RK)*RK;    // number of columns of A (rows of B) suitable for full register blocking
  Index n_end = (n/RN)*RN;    // number of columns of B-C suitable for processing RN columns at once
  Index i0 = internal::first_default_aligned(A,m);
  
  eigen_internal_assert(((lda%PacketSize)==0) && ((ldc%PacketSize)==0) && (i0==internal::first_default_aligned(C,m)));
  
  // handle the non aligned rows of A and C without any optimization:
  for(Index i=0; i<i0; ++i)
  {
    for(Index j=0; j<n; ++j)
    {
      Scalar c = C[i+j*ldc];
      for(Index k=0; k<d; ++k)
        c += B[k+j*ldb] * A[i+k*lda];
      C[i+j*ldc] = c;
    }
  }
  // process the remaining rows per chunk of BM rows
  for(Index ib=i0; ib<m; ib+=BM)
  {
    Index actual_b = std::min<Index>(BM, m-ib);                 // actual number of rows
    Index actual_b_end1 = (actual_b/SM)*SM;                   // actual number of rows suitable for peeling
    Index actual_b_end2 = (actual_b/PacketSize)*PacketSize;   // actual number of rows suitable for vectorization
    
    // Let's process two columns of B-C at once
    for(Index j=0; j<n_end; j+=RN)
    {
      const Scalar* Bc0 = B+(j+0)*ldb;
      const Scalar* Bc1 = B+(j+1)*ldb;
      
      for(Index k=0; k<d_end; k+=RK)
      {
        
        // load and expand a RN x RK block of B
        Packet b00, b10, b20, b30, b01, b11, b21, b31;
                  b00 = pset1<Packet>(Bc0[0]);
                  b10 = pset1<Packet>(Bc0[1]);
        if(RK==4) b20 = pset1<Packet>(Bc0[2]);
        if(RK==4) b30 = pset1<Packet>(Bc0[3]);
                  b01 = pset1<Packet>(Bc1[0]);
                  b11 = pset1<Packet>(Bc1[1]);
        if(RK==4) b21 = pset1<Packet>(Bc1[2]);
        if(RK==4) b31 = pset1<Packet>(Bc1[3]);
        
        Packet a0, a1, a2, a3, c0, c1, t0, t1;
        
        const Scalar* A0 = A+ib+(k+0)*lda;
        const Scalar* A1 = A+ib+(k+1)*lda;
        const Scalar* A2 = A+ib+(k+2)*lda;
        const Scalar* A3 = A+ib+(k+3)*lda;
        
        Scalar* C0 = C+ib+(j+0)*ldc;
        Scalar* C1 = C+ib+(j+1)*ldc;
        
                  a0 = pload<Packet>(A0);
                  a1 = pload<Packet>(A1);
        if(RK==4)
        {
          a2 = pload<Packet>(A2);
          a3 = pload<Packet>(A3);
        }
        else
        {
          // workaround "may be used uninitialized in this function" warning
          a2 = a3 = a0;
        }
        
#define KMADD(c, a, b, tmp) {tmp = b; tmp = pmul(a,tmp); c = padd(c,tmp);}
#define WORK(I)  \
                    c0 = pload<Packet>(C0+i+(I)*PacketSize);   \
                    c1 = pload<Packet>(C1+i+(I)*PacketSize);   \
                    KMADD(c0, a0, b00, t0)      \
                    KMADD(c1, a0, b01, t1)      \
                    a0 = pload<Packet>(A0+i+(I+1)*PacketSize); \
                    KMADD(c0, a1, b10, t0)      \
                    KMADD(c1, a1, b11, t1)       \
                    a1 = pload<Packet>(A1+i+(I+1)*PacketSize); \
          if(RK==4) KMADD(c0, a2, b20, t0)       \
          if(RK==4) KMADD(c1, a2, b21, t1)       \
          if(RK==4) a2 = pload<Packet>(A2+i+(I+1)*PacketSize); \
          if(RK==4) KMADD(c0, a3, b30, t0)       \
          if(RK==4) KMADD(c1, a3, b31, t1)       \
          if(RK==4) a3 = pload<Packet>(A3+i+(I+1)*PacketSize); \
                    pstore(C0+i+(I)*PacketSize, c0);           \
                    pstore(C1+i+(I)*PacketSize, c1)
        
        // process rows of A' - C' with aggressive vectorization and peeling 
        for(Index i=0; i<actual_b_end1; i+=PacketSize*8)
        {
          EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL1");
                    prefetch((A0+i+(5)*PacketSize));
                    prefetch((A1+i+(5)*PacketSize));
          if(RK==4) prefetch((A2+i+(5)*PacketSize));
          if(RK==4) prefetch((A3+i+(5)*PacketSize));
                    WORK(0);
                    WORK(1);
                    WORK(2);
                    WORK(3);
                    WORK(4);
                    WORK(5);
                    WORK(6);
                    WORK(7);
        }
        // process the remaining rows with vectorization only
        for(Index i=actual_b_end1; i<actual_b_end2; i+=PacketSize)
        {
          WORK(0);
        }
#undef WORK
        // process the remaining rows without vectorization
        for(Index i=actual_b_end2; i<actual_b; ++i)
        {
          if(RK==4)
          {
            C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]+A2[i]*Bc0[2]+A3[i]*Bc0[3];
            C1[i] += A0[i]*Bc1[0]+A1[i]*Bc1[1]+A2[i]*Bc1[2]+A3[i]*Bc1[3];
          }
          else
          {
            C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1];
            C1[i] += A0[i]*Bc1[0]+A1[i]*Bc1[1];
          }
        }
        
        Bc0 += RK;
        Bc1 += RK;
      } // peeled loop on k
    } // peeled loop on the columns j
    // process the last column (we now perform a matrix-vector product)
    if((n-n_end)>0)
    {
      const Scalar* Bc0 = B+(n-1)*ldb;
      
      for(Index k=0; k<d_end; k+=RK)
      {
        
        // load and expand a 1 x RK block of B
        Packet b00, b10, b20, b30;
                  b00 = pset1<Packet>(Bc0[0]);
                  b10 = pset1<Packet>(Bc0[1]);
        if(RK==4) b20 = pset1<Packet>(Bc0[2]);
        if(RK==4) b30 = pset1<Packet>(Bc0[3]);
        
        Packet a0, a1, a2, a3, c0, t0/*, t1*/;
        
        const Scalar* A0 = A+ib+(k+0)*lda;
        const Scalar* A1 = A+ib+(k+1)*lda;
        const Scalar* A2 = A+ib+(k+2)*lda;
        const Scalar* A3 = A+ib+(k+3)*lda;
        
        Scalar* C0 = C+ib+(n_end)*ldc;
        
                  a0 = pload<Packet>(A0);
                  a1 = pload<Packet>(A1);
        if(RK==4)
        {
          a2 = pload<Packet>(A2);
          a3 = pload<Packet>(A3);
        }
        else
        {
          // workaround "may be used uninitialized in this function" warning
          a2 = a3 = a0;
        }
        
#define WORK(I) \
                  c0 = pload<Packet>(C0+i+(I)*PacketSize);   \
                  KMADD(c0, a0, b00, t0)       \
                  a0 = pload<Packet>(A0+i+(I+1)*PacketSize); \
                  KMADD(c0, a1, b10, t0)       \
                  a1 = pload<Packet>(A1+i+(I+1)*PacketSize); \
        if(RK==4) KMADD(c0, a2, b20, t0)       \
        if(RK==4) a2 = pload<Packet>(A2+i+(I+1)*PacketSize); \
        if(RK==4) KMADD(c0, a3, b30, t0)       \
        if(RK==4) a3 = pload<Packet>(A3+i+(I+1)*PacketSize); \
                  pstore(C0+i+(I)*PacketSize, c0);
        
        // agressive vectorization and peeling
        for(Index i=0; i<actual_b_end1; i+=PacketSize*8)
        {
          EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL2");
          WORK(0);
          WORK(1);
          WORK(2);
          WORK(3);
          WORK(4);
          WORK(5);
          WORK(6);
          WORK(7);
        }
        // vectorization only
        for(Index i=actual_b_end1; i<actual_b_end2; i+=PacketSize)
        {
          WORK(0);
        }
        // remaining scalars
        for(Index i=actual_b_end2; i<actual_b; ++i)
        {
          if(RK==4) 
            C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]+A2[i]*Bc0[2]+A3[i]*Bc0[3];
          else
            C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1];
        }
        
        Bc0 += RK;
#undef WORK
      }
    }
    
    // process the last columns of A, corresponding to the last rows of B
    Index rd = d-d_end;
    if(rd>0)
    {
      for(Index j=0; j<n; ++j)
      {
        enum {
          Alignment = PacketSize>1 ? Aligned : 0
        };
        typedef Map<Matrix<Scalar,Dynamic,1>, Alignment > MapVector;
        typedef Map<const Matrix<Scalar,Dynamic,1>, Alignment > ConstMapVector;
        if(rd==1)       MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b);
        
        else if(rd==2)  MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b)
                                                        + B[1+d_end+j*ldb] * ConstMapVector(A+(d_end+1)*lda+ib, actual_b);
        
        else            MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b)
                                                        + B[1+d_end+j*ldb] * ConstMapVector(A+(d_end+1)*lda+ib, actual_b)
                                                        + B[2+d_end+j*ldb] * ConstMapVector(A+(d_end+2)*lda+ib, actual_b);
      }
    }
  
  } // blocking on the rows of A and C
}
template<typename ExpressionType , typename Scalar >
void Eigen::internal::stable_norm_kernel ( const ExpressionType &  bl,
Scalar &  ssq,
Scalar &  scale,
Scalar &  invScale 
) [inline]

Definition at line 18 of file StableNorm.h.

{
  Scalar maxCoeff = bl.cwiseAbs().maxCoeff();
  
  if(maxCoeff>scale)
  {
    ssq = ssq * numext::abs2(scale/maxCoeff);
    Scalar tmp = Scalar(1)/maxCoeff;
    if(tmp > NumTraits<Scalar>::highest())
    {
      invScale = NumTraits<Scalar>::highest();
      scale = Scalar(1)/invScale;
    }
    else if(maxCoeff>NumTraits<Scalar>::highest()) // we got a INF
    {
      invScale = Scalar(1);
      scale = maxCoeff;
    }
    else
    {
      scale = maxCoeff;
      invScale = tmp;
    }
  }
  else if(maxCoeff!=maxCoeff) // we got a NaN
  {
    scale = maxCoeff;
  }
  
  // TODO if the maxCoeff is much much smaller than the current scale,
  // then we can neglect this sub vector
  if(scale>Scalar(0)) // if scale==0, then bl is 0 
    ssq += (bl*invScale).squaredNorm();
}
template<typename T >
void Eigen::internal::swap ( scoped_array< T > &  a,
scoped_array< T > &  b 
)

Definition at line 599 of file Memory.h.

{
  std::swap(a.ptr(),b.ptr());
}
EIGEN_DEVICE_FUNC void Eigen::internal::throw_std_bad_alloc ( ) [inline]

Definition at line 67 of file Memory.h.

{
  #ifdef EIGEN_EXCEPTIONS
    throw std::bad_alloc();
  #else
    std::size_t huge = static_cast<std::size_t>(-1);
    new int[huge];
  #endif
}
template<typename Scalar , int Dim>
static Matrix<Scalar,2,2> Eigen::internal::toRotationMatrix ( const Scalar &  s) [inline, static]

Helper function to return an arbitrary rotation object to a rotation matrix.

Template Parameters:
Scalarthe numeric type of the matrix coefficients
Dimthe dimension of the current space

It returns a Dim x Dim fixed size matrix.

Default specializations are provided for:

Currently toRotationMatrix is only used by Transform.

See also:
class Transform, class Rotation2D, class Quaternion, class AngleAxis

Definition at line 182 of file RotationBase.h.

{
  EIGEN_STATIC_ASSERT(Dim==2,YOU_MADE_A_PROGRAMMING_MISTAKE)
  return Rotation2D<Scalar>(s).toRotationMatrix();
}
template<typename Scalar , int Dim, typename OtherDerived >
static Matrix<Scalar,Dim,Dim> Eigen::internal::toRotationMatrix ( const RotationBase< OtherDerived, Dim > &  r) [inline, static]

Definition at line 189 of file RotationBase.h.

{
  return r.toRotationMatrix();
}
template<typename Scalar , int Dim, typename OtherDerived >
static const MatrixBase<OtherDerived>& Eigen::internal::toRotationMatrix ( const MatrixBase< OtherDerived > &  mat) [inline, static]

Definition at line 195 of file RotationBase.h.

{
  EIGEN_STATIC_ASSERT(OtherDerived::RowsAtCompileTime==Dim && OtherDerived::ColsAtCompileTime==Dim,
    YOU_MADE_A_PROGRAMMING_MISTAKE)
  return mat;
}
template<typename IndexVector >
void Eigen::internal::treePostorder ( typename IndexVector::Scalar  n,
IndexVector &  parent,
IndexVector &  post 
)

Post order a tree.

Parameters:
nthe number of nodes
parentInput tree
postpostordered tree

Definition at line 178 of file SparseColEtree.h.

{
  typedef typename IndexVector::Scalar StorageIndex;
  IndexVector first_kid, next_kid; // Linked list of children 
  StorageIndex postnum; 
  // Allocate storage for working arrays and results 
  first_kid.resize(n+1); 
  next_kid.setZero(n+1);
  post.setZero(n+1);
  
  // Set up structure describing children
  first_kid.setConstant(-1); 
  for (StorageIndex v = n-1; v >= 0; v--) 
  {
    StorageIndex dad = parent(v);
    next_kid(v) = first_kid(dad); 
    first_kid(dad) = v; 
  }
  
  // Depth-first search from dummy root vertex #n
  postnum = 0; 
  internal::nr_etdfs(n, parent, first_kid, next_kid, post, postnum);
}
template<int StorageOrder, typename RealScalar , typename Scalar , typename Index >
static EIGEN_DEVICE_FUNC void Eigen::internal::tridiagonal_qr_step ( RealScalar *  diag,
RealScalar *  subdiag,
Index  start,
Index  end,
Scalar *  matrixQ,
Index  n 
) [static]

Performs a QR step on a tridiagonal symmetric matrix represented as a pair of two vectors diag and subdiag.

Parameters:
diagthe diagonal part of the input selfadjoint tridiagonal matrix
subdiagthe sub-diagonal part of the input selfadjoint tridiagonal matrix
startstarting index of the submatrix to work on
endlast+1 index of the submatrix to work on
matrixQpointer to the column-major matrix holding the eigenvectors, can be 0
nsize of the input matrix

For compilation efficiency reasons, this procedure does not use eigen expression for its arguments.

Implemented from Golub's "Matrix Computations", algorithm 8.3.2: "implicit symmetric QR step with Wilkinson shift"

Definition at line 801 of file SelfAdjointEigenSolver.h.

{
  using std::abs;
  RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5);
  RealScalar e = subdiag[end-1];
  // Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still
  // underflow thus leading to inf/NaN values when using the following commented code:
//   RealScalar e2 = numext::abs2(subdiag[end-1]);
//   RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2));
  // This explain the following, somewhat more complicated, version:
  RealScalar mu = diag[end];
  if(td==0)
    mu -= abs(e);
  else
  {
    RealScalar e2 = numext::abs2(subdiag[end-1]);
    RealScalar h = numext::hypot(td,e);
    if(e2==0)  mu -= (e / (td + (td>0 ? 1 : -1))) * (e / h);
    else       mu -= e2 / (td + (td>0 ? h : -h));
  }
  
  RealScalar x = diag[start] - mu;
  RealScalar z = subdiag[start];
  for (Index k = start; k < end; ++k)
  {
    JacobiRotation<RealScalar> rot;
    rot.makeGivens(x, z);

    // do T = G' T G
    RealScalar sdk = rot.s() * diag[k] + rot.c() * subdiag[k];
    RealScalar dkp1 = rot.s() * subdiag[k] + rot.c() * diag[k+1];

    diag[k] = rot.c() * (rot.c() * diag[k] - rot.s() * subdiag[k]) - rot.s() * (rot.c() * subdiag[k] - rot.s() * diag[k+1]);
    diag[k+1] = rot.s() * sdk + rot.c() * dkp1;
    subdiag[k] = rot.c() * sdk - rot.s() * dkp1;
    

    if (k > start)
      subdiag[k - 1] = rot.c() * subdiag[k-1] - rot.s() * z;

    x = subdiag[k];

    if (k < end - 1)
    {
      z = -rot.s() * subdiag[k+1];
      subdiag[k + 1] = rot.c() * subdiag[k+1];
    }
    
    // apply the givens rotation to the unit matrix Q = Q * G
    if (matrixQ)
    {
      // FIXME if StorageOrder == RowMajor this operation is not very efficient
      Map<Matrix<Scalar,Dynamic,Dynamic,StorageOrder> > q(matrixQ,n,n);
      q.applyOnTheRight(k,k+1,rot);
    }
  }
}
template<typename MatrixType , typename CoeffVectorType >
void Eigen::internal::tridiagonalization_inplace ( MatrixType &  matA,
CoeffVectorType &  hCoeffs 
)

Performs a tridiagonal decomposition of the selfadjoint matrix matA in-place.

Parameters:
[in,out]matAOn input the selfadjoint matrix. Only the lower triangular part is referenced. On output, the strict upper part is left unchanged, and the lower triangular part represents the T and Q matrices in packed format has detailed below.
[out]hCoeffsreturned Householder coefficients (see below)

On output, the tridiagonal selfadjoint matrix T is stored in the diagonal and lower sub-diagonal of the matrix matA. The unitary matrix Q is represented in a compact way as a product of Householder reflectors $ H_i $ such that: $ Q = H_{N-1} \ldots H_1 H_0 $. The Householder reflectors are defined as $ H_i = (I - h_i v_i v_i^T) $ where $ h_i = hCoeffs[i]$ is the $ i $th Householder coefficient and $ v_i $ is the Householder vector defined by $ v_i = [ 0, \ldots, 0, 1, matA(i+2,i), \ldots, matA(N-1,i) ]^T $.

Implemented from Golub's "Matrix Computations", algorithm 8.3.1.

See also:
Tridiagonalization::packedMatrix()

Definition at line 347 of file Tridiagonalization.h.

{
  using numext::conj;
  typedef typename MatrixType::Scalar Scalar;
  typedef typename MatrixType::RealScalar RealScalar;
  Index n = matA.rows();
  eigen_assert(n==matA.cols());
  eigen_assert(n==hCoeffs.size()+1 || n==1);
  
  for (Index i = 0; i<n-1; ++i)
  {
    Index remainingSize = n-i-1;
    RealScalar beta;
    Scalar h;
    matA.col(i).tail(remainingSize).makeHouseholderInPlace(h, beta);

    // Apply similarity transformation to remaining columns,
    // i.e., A = H A H' where H = I - h v v' and v = matA.col(i).tail(n-i-1)
    matA.col(i).coeffRef(i+1) = 1;

    hCoeffs.tail(n-i-1).noalias() = (matA.bottomRightCorner(remainingSize,remainingSize).template selfadjointView<Lower>()
                                  * (conj(h) * matA.col(i).tail(remainingSize)));

    hCoeffs.tail(n-i-1) += (conj(h)*Scalar(-0.5)*(hCoeffs.tail(remainingSize).dot(matA.col(i).tail(remainingSize)))) * matA.col(i).tail(n-i-1);

    matA.bottomRightCorner(remainingSize, remainingSize).template selfadjointView<Lower>()
      .rankUpdate(matA.col(i).tail(remainingSize), hCoeffs.tail(remainingSize), -1);

    matA.col(i).coeffRef(i+1) = beta;
    hCoeffs.coeffRef(i) = h;
  }
}
template<typename MatrixType , typename DiagonalType , typename SubDiagonalType >
void Eigen::internal::tridiagonalization_inplace ( MatrixType &  mat,
DiagonalType &  diag,
SubDiagonalType &  subdiag,
bool  extractQ 
)

Performs a full tridiagonalization in place.

Parameters:
[in,out]matOn input, the selfadjoint matrix whose tridiagonal decomposition is to be computed. Only the lower triangular part referenced. The rest is left unchanged. On output, the orthogonal matrix Q in the decomposition if extractQ is true.
[out]diagThe diagonal of the tridiagonal matrix T in the decomposition.
[out]subdiagThe subdiagonal of the tridiagonal matrix T in the decomposition.
[in]extractQIf true, the orthogonal matrix Q in the decomposition is computed and stored in mat.

Computes the tridiagonal decomposition of the selfadjoint matrix mat in place such that $ mat = Q T Q^* $ where $ Q $ is unitary and $ T $ a real symmetric tridiagonal matrix.

The tridiagonal matrix T is passed to the output parameters diag and subdiag. If extractQ is true, then the orthogonal matrix Q is passed to mat. Otherwise the lower part of the matrix mat is destroyed.

The vectors diag and subdiag are not resized. The function assumes that they are already of the correct size. The length of the vector diag should equal the number of rows in mat, and the length of the vector subdiag should be one left.

This implementation contains an optimized path for 3-by-3 matrices which is especially useful for plane fitting.

Note:
Currently, it requires two temporary vectors to hold the intermediate Householder coefficients, and to reconstruct the matrix Q from the Householder reflectors.

Example (this uses the same matrix as the example in Tridiagonalization::Tridiagonalization(const MatrixType&)):

Output:

See also:
class Tridiagonalization

Definition at line 427 of file Tridiagonalization.h.

{
  eigen_assert(mat.cols()==mat.rows() && diag.size()==mat.rows() && subdiag.size()==mat.rows()-1);
  tridiagonalization_inplace_selector<MatrixType>::run(mat, diag, subdiag, extractQ);
}
template<typename MatrixType >
void Eigen::internal::upperbidiagonalization_blocked_helper ( MatrixType &  A,
typename MatrixType::RealScalar *  diagonal,
typename MatrixType::RealScalar *  upper_diagonal,
Index  bs,
Ref< Matrix< typename MatrixType::Scalar, Dynamic, Dynamic, traits< MatrixType >::Flags &RowMajorBit > >  X,
Ref< Matrix< typename MatrixType::Scalar, Dynamic, Dynamic, traits< MatrixType >::Flags &RowMajorBit > >  Y 
)

Helper routine for the block reduction to upper bidiagonal form.

Let's partition the matrix A:

| A00 A01 | A = | | | A10 A11 |

This function reduces to bidiagonal form the left rows x blockSize vertical panel [A00/A10] and the blockSize x cols horizontal panel [A00 A01] of the matrix A. The bottom-right block A11 is updated using matrix-matrix products: A22 -= V * Y^T - X * U^T where V and U contains the left and right Householder vectors. U and V are stored in A10, and A01 respectively, and the update matrices X and Y are computed during the reduction.

Definition at line 152 of file UpperBidiagonalization.h.

{
  typedef typename MatrixType::Scalar Scalar;
  enum { StorageOrder = traits<MatrixType>::Flags & RowMajorBit };
  typedef InnerStride<int(StorageOrder) == int(ColMajor) ? 1 : Dynamic> ColInnerStride;
  typedef InnerStride<int(StorageOrder) == int(ColMajor) ? Dynamic : 1> RowInnerStride;
  typedef Ref<Matrix<Scalar, Dynamic, 1>, 0, ColInnerStride>    SubColumnType;
  typedef Ref<Matrix<Scalar, 1, Dynamic>, 0, RowInnerStride>    SubRowType;
  typedef Ref<Matrix<Scalar, Dynamic, Dynamic, StorageOrder > > SubMatType;
  
  Index brows = A.rows();
  Index bcols = A.cols();

  Scalar tau_u, tau_u_prev(0), tau_v;

  for(Index k = 0; k < bs; ++k)
  {
    Index remainingRows = brows - k;
    Index remainingCols = bcols - k - 1;

    SubMatType X_k1( X.block(k,0, remainingRows,k) );
    SubMatType V_k1( A.block(k,0, remainingRows,k) );

    // 1 - update the k-th column of A
    SubColumnType v_k = A.col(k).tail(remainingRows);
          v_k -= V_k1 * Y.row(k).head(k).adjoint();
    if(k) v_k -= X_k1 * A.col(k).head(k);
    
    // 2 - construct left Householder transform in-place
    v_k.makeHouseholderInPlace(tau_v, diagonal[k]);
       
    if(k+1<bcols)
    {
      SubMatType Y_k  ( Y.block(k+1,0, remainingCols, k+1) );
      SubMatType U_k1 ( A.block(0,k+1, k,remainingCols) );
      
      // this eases the application of Householder transforAions
      // A(k,k) will store tau_v later
      A(k,k) = Scalar(1);

      // 3 - Compute y_k^T = tau_v * ( A^T*v_k - Y_k-1*V_k-1^T*v_k - U_k-1*X_k-1^T*v_k )
      {
        SubColumnType y_k( Y.col(k).tail(remainingCols) );
        
        // let's use the begining of column k of Y as a temporary vector
        SubColumnType tmp( Y.col(k).head(k) );
        y_k.noalias()  = A.block(k,k+1, remainingRows,remainingCols).adjoint() * v_k; // bottleneck
        tmp.noalias()  = V_k1.adjoint()  * v_k;
        y_k.noalias() -= Y_k.leftCols(k) * tmp;
        tmp.noalias()  = X_k1.adjoint()  * v_k;
        y_k.noalias() -= U_k1.adjoint()  * tmp;
        y_k *= numext::conj(tau_v);
      }

      // 4 - update k-th row of A (it will become u_k)
      SubRowType u_k( A.row(k).tail(remainingCols) );
      u_k = u_k.conjugate();
      {
        u_k -= Y_k * A.row(k).head(k+1).adjoint();
        if(k) u_k -= U_k1.adjoint() * X.row(k).head(k).adjoint();
      }

      // 5 - construct right Householder transform in-place
      u_k.makeHouseholderInPlace(tau_u, upper_diagonal[k]);

      // this eases the application of Householder transformations
      // A(k,k+1) will store tau_u later
      A(k,k+1) = Scalar(1);

      // 6 - Compute x_k = tau_u * ( A*u_k - X_k-1*U_k-1^T*u_k - V_k*Y_k^T*u_k )
      {
        SubColumnType x_k ( X.col(k).tail(remainingRows-1) );
        
        // let's use the begining of column k of X as a temporary vectors
        // note that tmp0 and tmp1 overlaps
        SubColumnType tmp0 ( X.col(k).head(k) ),
                      tmp1 ( X.col(k).head(k+1) );
                    
        x_k.noalias()   = A.block(k+1,k+1, remainingRows-1,remainingCols) * u_k.transpose(); // bottleneck
        tmp0.noalias()  = U_k1 * u_k.transpose();
        x_k.noalias()  -= X_k1.bottomRows(remainingRows-1) * tmp0;
        tmp1.noalias()  = Y_k.adjoint() * u_k.transpose();
        x_k.noalias()  -= A.block(k+1,0, remainingRows-1,k+1) * tmp1;
        x_k *= numext::conj(tau_u);
        tau_u = numext::conj(tau_u);
        u_k = u_k.conjugate();
      }

      if(k>0) A.coeffRef(k-1,k) = tau_u_prev;
      tau_u_prev = tau_u;
    }
    else
      A.coeffRef(k-1,k) = tau_u_prev;

    A.coeffRef(k,k) = tau_v;
  }
  
  if(bs<bcols)
    A.coeffRef(bs-1,bs) = tau_u_prev;

  // update A22
  if(bcols>bs && brows>bs)
  {
    SubMatType A11( A.bottomRightCorner(brows-bs,bcols-bs) );
    SubMatType A10( A.block(bs,0, brows-bs,bs) );
    SubMatType A01( A.block(0,bs, bs,bcols-bs) );
    Scalar tmp = A01(bs-1,0);
    A01(bs-1,0) = 1;
    A11.noalias() -= A10 * Y.topLeftCorner(bcols,bs).bottomRows(bcols-bs).adjoint();
    A11.noalias() -= X.topLeftCorner(brows,bs).bottomRows(brows-bs) * A01;
    A01(bs-1,0) = tmp;
  }
}
template<typename MatrixType , typename BidiagType >
void Eigen::internal::upperbidiagonalization_inplace_blocked ( MatrixType &  A,
BidiagType &  bidiagonal,
Index  maxBlockSize = 32,
typename MatrixType::Scalar *  = 0 
)

Implementation of a block-bidiagonal reduction. It is based on the following paper: The Design of a Parallel Dense Linear Algebra Software Library: Reduction to Hessenberg, Tridiagonal, and Bidiagonal Form. by Jaeyoung Choi, Jack J. Dongarra, David W. Walker. (1995) section 3.3

Definition at line 282 of file UpperBidiagonalization.h.

{
  typedef typename MatrixType::Scalar Scalar;
  typedef Block<MatrixType,Dynamic,Dynamic> BlockType;

  Index rows = A.rows();
  Index cols = A.cols();
  Index size = (std::min)(rows, cols);

  // X and Y are work space
  enum { StorageOrder = traits<MatrixType>::Flags & RowMajorBit };
  Matrix<Scalar,
         MatrixType::RowsAtCompileTime,
         Dynamic,
         StorageOrder,
         MatrixType::MaxRowsAtCompileTime> X(rows,maxBlockSize);
  Matrix<Scalar,
         MatrixType::ColsAtCompileTime,
         Dynamic,
         StorageOrder,
         MatrixType::MaxColsAtCompileTime> Y(cols,maxBlockSize);
  Index blockSize = (std::min)(maxBlockSize,size);

  Index k = 0;
  for(k = 0; k < size; k += blockSize)
  {
    Index bs = (std::min)(size-k,blockSize);  // actual size of the block
    Index brows = rows - k;                   // rows of the block
    Index bcols = cols - k;                   // columns of the block

    // partition the matrix A:
    // 
    //      | A00 A01 A02 |
    //      |             |
    // A  = | A10 A11 A12 |
    //      |             |
    //      | A20 A21 A22 |
    //
    // where A11 is a bs x bs diagonal block,
    // and let:
    //      | A11 A12 |
    //  B = |         |
    //      | A21 A22 |

    BlockType B = A.block(k,k,brows,bcols);
    
    // This stage performs the bidiagonalization of A11, A21, A12, and updating of A22.
    // Finally, the algorithm continue on the updated A22.
    //
    // However, if B is too small, or A22 empty, then let's use an unblocked strategy
    if(k+bs==cols || bcols<48) // somewhat arbitrary threshold
    {
      upperbidiagonalization_inplace_unblocked(B,
                                               &(bidiagonal.template diagonal<0>().coeffRef(k)),
                                               &(bidiagonal.template diagonal<1>().coeffRef(k)),
                                               X.data()
                                              );
      break; // We're done
    }
    else
    {
      upperbidiagonalization_blocked_helper<BlockType>( B,
                                                        &(bidiagonal.template diagonal<0>().coeffRef(k)),
                                                        &(bidiagonal.template diagonal<1>().coeffRef(k)),
                                                        bs,
                                                        X.topLeftCorner(brows,bs),
                                                        Y.topLeftCorner(bcols,bs)
                                                      );
    }
  }
}
template<typename MatrixType >
void Eigen::internal::upperbidiagonalization_inplace_unblocked ( MatrixType &  mat,
typename MatrixType::RealScalar *  diagonal,
typename MatrixType::RealScalar *  upper_diagonal,
typename MatrixType::Scalar *  tempData = 0 
)

Definition at line 93 of file UpperBidiagonalization.h.

{
  typedef typename MatrixType::Scalar Scalar;

  Index rows = mat.rows();
  Index cols = mat.cols();

  typedef Matrix<Scalar,Dynamic,1,ColMajor,MatrixType::MaxRowsAtCompileTime,1> TempType;
  TempType tempVector;
  if(tempData==0)
  {
    tempVector.resize(rows);
    tempData = tempVector.data();
  }

  for (Index k = 0; /* breaks at k==cols-1 below */ ; ++k)
  {
    Index remainingRows = rows - k;
    Index remainingCols = cols - k - 1;

    // construct left householder transform in-place in A
    mat.col(k).tail(remainingRows)
       .makeHouseholderInPlace(mat.coeffRef(k,k), diagonal[k]);
    // apply householder transform to remaining part of A on the left
    mat.bottomRightCorner(remainingRows, remainingCols)
       .applyHouseholderOnTheLeft(mat.col(k).tail(remainingRows-1), mat.coeff(k,k), tempData);

    if(k == cols-1) break;

    // construct right householder transform in-place in mat
    mat.row(k).tail(remainingCols)
       .makeHouseholderInPlace(mat.coeffRef(k,k+1), upper_diagonal[k]);
    // apply householder transform to remaining part of mat on the left
    mat.bottomRightCorner(remainingRows-1, remainingCols)
       .applyHouseholderOnTheRight(mat.row(k).tail(remainingCols-1).transpose(), mat.coeff(k,k+1), tempData);
  }
}
bool Eigen::internal::useSpecificBlockingSizes ( Index &  k,
Index &  m,
Index &  n 
) [inline]

Definition at line 266 of file GeneralBlockPanelKernel.h.

{
#ifdef EIGEN_TEST_SPECIFIC_BLOCKING_SIZES
  if (EIGEN_TEST_SPECIFIC_BLOCKING_SIZES) {
    k = std::min<Index>(k, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_K);
    m = std::min<Index>(m, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_M);
    n = std::min<Index>(n, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_N);
    return true;
  }
#else
  EIGEN_UNUSED_VARIABLE(k)
  EIGEN_UNUSED_VARIABLE(m)
  EIGEN_UNUSED_VARIABLE(n)
#endif
  return false;
}

Variable Documentation

const std::ptrdiff_t Eigen::internal::defaultL1CacheSize = 16*1024

Definition at line 33 of file GeneralBlockPanelKernel.h.

const std::ptrdiff_t Eigen::internal::defaultL2CacheSize = 512*1024

Definition at line 34 of file GeneralBlockPanelKernel.h.

const std::ptrdiff_t Eigen::internal::defaultL3CacheSize = 512*1024

Definition at line 35 of file GeneralBlockPanelKernel.h.

Packet16uc Eigen::internal::p16uc_DUPLICATE32_HI = { 0,1,2,3, 0,1,2,3, 4,5,6,7, 4,5,6,7 } [static]

Definition at line 84 of file AltiVec/PacketMath.h.

Packet16uc Eigen::internal::p16uc_HALF64_0_16 = vec_sld(vec_splat((Packet16uc) vec_abs(p4i_MINUS16), 0), (Packet16uc)p4i_ZERO, 8) [static]

Definition at line 108 of file AltiVec/PacketMath.h.

Definition at line 107 of file AltiVec/PacketMath.h.

Definition at line 106 of file AltiVec/PacketMath.h.

Packet16uc Eigen::internal::p16uc_REVERSE32 = { 12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3 } [static]

Definition at line 83 of file AltiVec/PacketMath.h.

Packet16uc Eigen::internal::p16uc_REVERSE64 = { 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 } [static]

Definition at line 105 of file AltiVec/PacketMath.h.

uint32x2_t Eigen::internal::p2ui_CONJ_XOR = EIGEN_INIT_NEON_PACKET2(0x00000000, 0x80000000) [static]

Definition at line 18 of file NEON/Complex.h.

Packet2ul Eigen::internal::p2ul_CONJ_XOR1 = (Packet2ul) vec_sld((Packet4ui) p2l_ZERO, (Packet4ui) p2d_ZERO_, 8) [static]

Definition at line 22 of file AltiVec/Complex.h.

Packet2ul Eigen::internal::p2ul_CONJ_XOR2 = (Packet2ul) vec_sld((Packet4ui) p2d_ZERO_, (Packet4ui) p2l_ZERO, 8) [static]

Definition at line 23 of file AltiVec/Complex.h.

Packet4f Eigen::internal::p4f_COUNTDOWN = { 0.0, 1.0, 2.0, 3.0 } [static]

Definition at line 80 of file AltiVec/PacketMath.h.

Packet4f Eigen::internal::p4f_ONE = vec_ctf(p4i_ONE, 0) [static]

Definition at line 74 of file AltiVec/PacketMath.h.

Packet4f Eigen::internal::p4f_ZERO_ = (Packet4f) vec_sl((Packet4ui)p4i_MINUS1, (Packet4ui)p4i_MINUS1) [static]

Definition at line 78 of file AltiVec/PacketMath.h.

Packet4i Eigen::internal::p4i_COUNTDOWN = { 0, 1, 2, 3 } [static]

Definition at line 81 of file AltiVec/PacketMath.h.

static uint32x4_t Eigen::internal::p4ui_CONJ_XOR = vec_mergeh((Packet4ui)p4i_ZERO, (Packet4ui)p4f_ZERO_) [static]

Definition at line 17 of file AltiVec/Complex.h.

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines