repo
stringlengths
1
152
file
stringlengths
14
221
code
stringlengths
501
25k
file_length
int64
501
25k
avg_line_length
float64
20
99.5
max_line_length
int64
21
134
extension_type
stringclasses
2 values
abess
abess-master/python/include/Eigen/src/Core/arch/AVX/TypeCasting.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2015 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_TYPE_CASTING_AVX_H #define EIGEN_TYPE_CASTING_AVX_H namespace Eigen { namespace internal { // For now we use SSE to handle integers, so we can't use AVX instructions to cast // from int to float template <> struct type_casting_traits<float, int> { enum { VectorizedCast = 0, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; }; template <> struct type_casting_traits<int, float> { enum { VectorizedCast = 0, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; }; template<> EIGEN_STRONG_INLINE Packet8i pcast<Packet8f, Packet8i>(const Packet8f& a) { return _mm256_cvtps_epi32(a); } template<> EIGEN_STRONG_INLINE Packet8f pcast<Packet8i, Packet8f>(const Packet8i& a) { return _mm256_cvtepi32_ps(a); } } // end namespace internal } // end namespace Eigen #endif // EIGEN_TYPE_CASTING_AVX_H
1,194
21.980769
86
h
abess
abess-master/python/include/Eigen/src/Core/arch/AVX512/MathFunctions.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Pedro Gonnet ([email protected]) // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef THIRD_PARTY_EIGEN3_EIGEN_SRC_CORE_ARCH_AVX512_MATHFUNCTIONS_H_ #define THIRD_PARTY_EIGEN3_EIGEN_SRC_CORE_ARCH_AVX512_MATHFUNCTIONS_H_ namespace Eigen { namespace internal { // Disable the code for older versions of gcc that don't support many of the required avx512 instrinsics. #if EIGEN_GNUC_AT_LEAST(5, 3) #define _EIGEN_DECLARE_CONST_Packet16f(NAME, X) \ const Packet16f p16f_##NAME = pset1<Packet16f>(X) #define _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(NAME, X) \ const Packet16f p16f_##NAME = (__m512)pset1<Packet16i>(X) #define _EIGEN_DECLARE_CONST_Packet8d(NAME, X) \ const Packet8d p8d_##NAME = pset1<Packet8d>(X) #define _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(NAME, X) \ const Packet8d p8d_##NAME = _mm512_castsi512_pd(_mm512_set1_epi64(X)) // Natural logarithm // Computes log(x) as log(2^e * m) = C*e + log(m), where the constant C =log(2) // and m is in the range [sqrt(1/2),sqrt(2)). In this range, the logarithm can // be easily approximated by a polynomial centered on m=1 for stability. #if defined(EIGEN_VECTORIZE_AVX512DQ) template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f plog<Packet16f>(const Packet16f& _x) { Packet16f x = _x; _EIGEN_DECLARE_CONST_Packet16f(1, 1.0f); _EIGEN_DECLARE_CONST_Packet16f(half, 0.5f); _EIGEN_DECLARE_CONST_Packet16f(126f, 126.0f); _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(inv_mant_mask, ~0x7f800000); // The smallest non denormalized float number. _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(min_norm_pos, 0x00800000); _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(minus_inf, 0xff800000); _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(nan, 0x7fc00000); // Polynomial coefficients. _EIGEN_DECLARE_CONST_Packet16f(cephes_SQRTHF, 0.707106781186547524f); _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p0, 7.0376836292E-2f); _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p1, -1.1514610310E-1f); _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p2, 1.1676998740E-1f); _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p3, -1.2420140846E-1f); _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p4, +1.4249322787E-1f); _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p5, -1.6668057665E-1f); _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p6, +2.0000714765E-1f); _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p7, -2.4999993993E-1f); _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p8, +3.3333331174E-1f); _EIGEN_DECLARE_CONST_Packet16f(cephes_log_q1, -2.12194440e-4f); _EIGEN_DECLARE_CONST_Packet16f(cephes_log_q2, 0.693359375f); // invalid_mask is set to true when x is NaN __mmask16 invalid_mask = _mm512_cmp_ps_mask(x, _mm512_setzero_ps(), _CMP_NGE_UQ); __mmask16 iszero_mask = _mm512_cmp_ps_mask(x, _mm512_setzero_ps(), _CMP_EQ_UQ); // Truncate input values to the minimum positive normal. x = pmax(x, p16f_min_norm_pos); // Extract the shifted exponents. Packet16f emm0 = _mm512_cvtepi32_ps(_mm512_srli_epi32((__m512i)x, 23)); Packet16f e = _mm512_sub_ps(emm0, p16f_126f); // Set the exponents to -1, i.e. x are in the range [0.5,1). x = _mm512_and_ps(x, p16f_inv_mant_mask); x = _mm512_or_ps(x, p16f_half); // part2: Shift the inputs from the range [0.5,1) to [sqrt(1/2),sqrt(2)) // and shift by -1. The values are then centered around 0, which improves // the stability of the polynomial evaluation. // if( x < SQRTHF ) { // e -= 1; // x = x + x - 1.0; // } else { x = x - 1.0; } __mmask16 mask = _mm512_cmp_ps_mask(x, p16f_cephes_SQRTHF, _CMP_LT_OQ); Packet16f tmp = _mm512_mask_blend_ps(mask, x, _mm512_setzero_ps()); x = psub(x, p16f_1); e = psub(e, _mm512_mask_blend_ps(mask, p16f_1, _mm512_setzero_ps())); x = padd(x, tmp); Packet16f x2 = pmul(x, x); Packet16f x3 = pmul(x2, x); // Evaluate the polynomial approximant of degree 8 in three parts, probably // to improve instruction-level parallelism. Packet16f y, y1, y2; y = pmadd(p16f_cephes_log_p0, x, p16f_cephes_log_p1); y1 = pmadd(p16f_cephes_log_p3, x, p16f_cephes_log_p4); y2 = pmadd(p16f_cephes_log_p6, x, p16f_cephes_log_p7); y = pmadd(y, x, p16f_cephes_log_p2); y1 = pmadd(y1, x, p16f_cephes_log_p5); y2 = pmadd(y2, x, p16f_cephes_log_p8); y = pmadd(y, x3, y1); y = pmadd(y, x3, y2); y = pmul(y, x3); // Add the logarithm of the exponent back to the result of the interpolation. y1 = pmul(e, p16f_cephes_log_q1); tmp = pmul(x2, p16f_half); y = padd(y, y1); x = psub(x, tmp); y2 = pmul(e, p16f_cephes_log_q2); x = padd(x, y); x = padd(x, y2); // Filter out invalid inputs, i.e. negative arg will be NAN, 0 will be -INF. return _mm512_mask_blend_ps(iszero_mask, p16f_minus_inf, _mm512_mask_blend_ps(invalid_mask, p16f_nan, x)); } #endif // Exponential function. Works by writing "x = m*log(2) + r" where // "m = floor(x/log(2)+1/2)" and "r" is the remainder. The result is then // "exp(x) = 2^m*exp(r)" where exp(r) is in the range [-1,1). template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f pexp<Packet16f>(const Packet16f& _x) { _EIGEN_DECLARE_CONST_Packet16f(1, 1.0f); _EIGEN_DECLARE_CONST_Packet16f(half, 0.5f); _EIGEN_DECLARE_CONST_Packet16f(127, 127.0f); _EIGEN_DECLARE_CONST_Packet16f(exp_hi, 88.3762626647950f); _EIGEN_DECLARE_CONST_Packet16f(exp_lo, -88.3762626647949f); _EIGEN_DECLARE_CONST_Packet16f(cephes_LOG2EF, 1.44269504088896341f); _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p0, 1.9875691500E-4f); _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p1, 1.3981999507E-3f); _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p2, 8.3334519073E-3f); _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p3, 4.1665795894E-2f); _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p4, 1.6666665459E-1f); _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p5, 5.0000001201E-1f); // Clamp x. Packet16f x = pmax(pmin(_x, p16f_exp_hi), p16f_exp_lo); // Express exp(x) as exp(m*ln(2) + r), start by extracting // m = floor(x/ln(2) + 0.5). Packet16f m = _mm512_floor_ps(pmadd(x, p16f_cephes_LOG2EF, p16f_half)); // Get r = x - m*ln(2). Note that we can do this without losing more than one // ulp precision due to the FMA instruction. _EIGEN_DECLARE_CONST_Packet16f(nln2, -0.6931471805599453f); Packet16f r = _mm512_fmadd_ps(m, p16f_nln2, x); Packet16f r2 = pmul(r, r); // TODO(gonnet): Split into odd/even polynomials and try to exploit // instruction-level parallelism. Packet16f y = p16f_cephes_exp_p0; y = pmadd(y, r, p16f_cephes_exp_p1); y = pmadd(y, r, p16f_cephes_exp_p2); y = pmadd(y, r, p16f_cephes_exp_p3); y = pmadd(y, r, p16f_cephes_exp_p4); y = pmadd(y, r, p16f_cephes_exp_p5); y = pmadd(y, r2, r); y = padd(y, p16f_1); // Build emm0 = 2^m. Packet16i emm0 = _mm512_cvttps_epi32(padd(m, p16f_127)); emm0 = _mm512_slli_epi32(emm0, 23); // Return 2^m * exp(r). return pmax(pmul(y, _mm512_castsi512_ps(emm0)), _x); } /*template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d pexp<Packet8d>(const Packet8d& _x) { Packet8d x = _x; _EIGEN_DECLARE_CONST_Packet8d(1, 1.0); _EIGEN_DECLARE_CONST_Packet8d(2, 2.0); _EIGEN_DECLARE_CONST_Packet8d(exp_hi, 709.437); _EIGEN_DECLARE_CONST_Packet8d(exp_lo, -709.436139303); _EIGEN_DECLARE_CONST_Packet8d(cephes_LOG2EF, 1.4426950408889634073599); _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_p0, 1.26177193074810590878e-4); _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_p1, 3.02994407707441961300e-2); _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_p2, 9.99999999999999999910e-1); _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_q0, 3.00198505138664455042e-6); _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_q1, 2.52448340349684104192e-3); _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_q2, 2.27265548208155028766e-1); _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_q3, 2.00000000000000000009e0); _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_C1, 0.693145751953125); _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_C2, 1.42860682030941723212e-6); // clamp x x = pmax(pmin(x, p8d_exp_hi), p8d_exp_lo); // Express exp(x) as exp(g + n*log(2)). const Packet8d n = _mm512_mul_round_pd(p8d_cephes_LOG2EF, x, _MM_FROUND_TO_NEAREST_INT); // Get the remainder modulo log(2), i.e. the "g" described above. Subtract // n*log(2) out in two steps, i.e. n*C1 + n*C2, C1+C2=log2 to get the last // digits right. const Packet8d nC1 = pmul(n, p8d_cephes_exp_C1); const Packet8d nC2 = pmul(n, p8d_cephes_exp_C2); x = psub(x, nC1); x = psub(x, nC2); const Packet8d x2 = pmul(x, x); // Evaluate the numerator polynomial of the rational interpolant. Packet8d px = p8d_cephes_exp_p0; px = pmadd(px, x2, p8d_cephes_exp_p1); px = pmadd(px, x2, p8d_cephes_exp_p2); px = pmul(px, x); // Evaluate the denominator polynomial of the rational interpolant. Packet8d qx = p8d_cephes_exp_q0; qx = pmadd(qx, x2, p8d_cephes_exp_q1); qx = pmadd(qx, x2, p8d_cephes_exp_q2); qx = pmadd(qx, x2, p8d_cephes_exp_q3); // I don't really get this bit, copied from the SSE2 routines, so... // TODO(gonnet): Figure out what is going on here, perhaps find a better // rational interpolant? x = _mm512_div_pd(px, psub(qx, px)); x = pmadd(p8d_2, x, p8d_1); // Build e=2^n. const Packet8d e = _mm512_castsi512_pd(_mm512_slli_epi64( _mm512_add_epi64(_mm512_cvtpd_epi64(n), _mm512_set1_epi64(1023)), 52)); // Construct the result 2^n * exp(g) = e * x. The max is used to catch // non-finite values in the input. return pmax(pmul(x, e), _x); }*/ // Functions for sqrt. // The EIGEN_FAST_MATH version uses the _mm_rsqrt_ps approximation and one step // of Newton's method, at a cost of 1-2 bits of precision as opposed to the // exact solution. The main advantage of this approach is not just speed, but // also the fact that it can be inlined and pipelined with other computations, // further reducing its effective latency. #if EIGEN_FAST_MATH template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f psqrt<Packet16f>(const Packet16f& _x) { _EIGEN_DECLARE_CONST_Packet16f(one_point_five, 1.5f); _EIGEN_DECLARE_CONST_Packet16f(minus_half, -0.5f); _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(flt_min, 0x00800000); Packet16f neg_half = pmul(_x, p16f_minus_half); // select only the inverse sqrt of positive normal inputs (denormals are // flushed to zero and cause infs as well). __mmask16 non_zero_mask = _mm512_cmp_ps_mask(_x, p16f_flt_min, _CMP_GE_OQ); Packet16f x = _mm512_mask_blend_ps(non_zero_mask, _mm512_rsqrt14_ps(_x), _mm512_setzero_ps()); // Do a single step of Newton's iteration. x = pmul(x, pmadd(neg_half, pmul(x, x), p16f_one_point_five)); // Multiply the original _x by it's reciprocal square root to extract the // square root. return pmul(_x, x); } template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d psqrt<Packet8d>(const Packet8d& _x) { _EIGEN_DECLARE_CONST_Packet8d(one_point_five, 1.5); _EIGEN_DECLARE_CONST_Packet8d(minus_half, -0.5); _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(dbl_min, 0x0010000000000000LL); Packet8d neg_half = pmul(_x, p8d_minus_half); // select only the inverse sqrt of positive normal inputs (denormals are // flushed to zero and cause infs as well). __mmask8 non_zero_mask = _mm512_cmp_pd_mask(_x, p8d_dbl_min, _CMP_GE_OQ); Packet8d x = _mm512_mask_blend_pd(non_zero_mask, _mm512_rsqrt14_pd(_x), _mm512_setzero_pd()); // Do a first step of Newton's iteration. x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five)); // Do a second step of Newton's iteration. x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five)); // Multiply the original _x by it's reciprocal square root to extract the // square root. return pmul(_x, x); } #else template <> EIGEN_STRONG_INLINE Packet16f psqrt<Packet16f>(const Packet16f& x) { return _mm512_sqrt_ps(x); } template <> EIGEN_STRONG_INLINE Packet8d psqrt<Packet8d>(const Packet8d& x) { return _mm512_sqrt_pd(x); } #endif // Functions for rsqrt. // Almost identical to the sqrt routine, just leave out the last multiplication // and fill in NaN/Inf where needed. Note that this function only exists as an // iterative version for doubles since there is no instruction for diretly // computing the reciprocal square root in AVX-512. #ifdef EIGEN_FAST_MATH template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f prsqrt<Packet16f>(const Packet16f& _x) { _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(inf, 0x7f800000); _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(nan, 0x7fc00000); _EIGEN_DECLARE_CONST_Packet16f(one_point_five, 1.5f); _EIGEN_DECLARE_CONST_Packet16f(minus_half, -0.5f); _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(flt_min, 0x00800000); Packet16f neg_half = pmul(_x, p16f_minus_half); // select only the inverse sqrt of positive normal inputs (denormals are // flushed to zero and cause infs as well). __mmask16 le_zero_mask = _mm512_cmp_ps_mask(_x, p16f_flt_min, _CMP_LT_OQ); Packet16f x = _mm512_mask_blend_ps(le_zero_mask, _mm512_setzero_ps(), _mm512_rsqrt14_ps(_x)); // Fill in NaNs and Infs for the negative/zero entries. __mmask16 neg_mask = _mm512_cmp_ps_mask(_x, _mm512_setzero_ps(), _CMP_LT_OQ); Packet16f infs_and_nans = _mm512_mask_blend_ps( neg_mask, p16f_nan, _mm512_mask_blend_ps(le_zero_mask, p16f_inf, _mm512_setzero_ps())); // Do a single step of Newton's iteration. x = pmul(x, pmadd(neg_half, pmul(x, x), p16f_one_point_five)); // Insert NaNs and Infs in all the right places. return _mm512_mask_blend_ps(le_zero_mask, infs_and_nans, x); } template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d prsqrt<Packet8d>(const Packet8d& _x) { _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(inf, 0x7ff0000000000000LL); _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(nan, 0x7ff1000000000000LL); _EIGEN_DECLARE_CONST_Packet8d(one_point_five, 1.5); _EIGEN_DECLARE_CONST_Packet8d(minus_half, -0.5); _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(dbl_min, 0x0010000000000000LL); Packet8d neg_half = pmul(_x, p8d_minus_half); // select only the inverse sqrt of positive normal inputs (denormals are // flushed to zero and cause infs as well). __mmask8 le_zero_mask = _mm512_cmp_pd_mask(_x, p8d_dbl_min, _CMP_LT_OQ); Packet8d x = _mm512_mask_blend_pd(le_zero_mask, _mm512_setzero_pd(), _mm512_rsqrt14_pd(_x)); // Fill in NaNs and Infs for the negative/zero entries. __mmask8 neg_mask = _mm512_cmp_pd_mask(_x, _mm512_setzero_pd(), _CMP_LT_OQ); Packet8d infs_and_nans = _mm512_mask_blend_pd( neg_mask, p8d_nan, _mm512_mask_blend_pd(le_zero_mask, p8d_inf, _mm512_setzero_pd())); // Do a first step of Newton's iteration. x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five)); // Do a second step of Newton's iteration. x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five)); // Insert NaNs and Infs in all the right places. return _mm512_mask_blend_pd(le_zero_mask, infs_and_nans, x); } #else template <> EIGEN_STRONG_INLINE Packet16f prsqrt<Packet16f>(const Packet16f& x) { return _mm512_rsqrt28_ps(x); } #endif #endif } // end namespace internal } // end namespace Eigen #endif // THIRD_PARTY_EIGEN3_EIGEN_SRC_CORE_ARCH_AVX512_MATHFUNCTIONS_H_
15,827
38.869018
105
h
abess
abess-master/python/include/Eigen/src/Core/arch/AltiVec/MathFunctions.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2007 Julien Pommier // Copyright (C) 2009 Gael Guennebaud <[email protected]> // Copyright (C) 2016 Konstantinos Margaritis <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. /* The sin, cos, exp, and log functions of this file come from * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/ */ #ifndef EIGEN_MATH_FUNCTIONS_ALTIVEC_H #define EIGEN_MATH_FUNCTIONS_ALTIVEC_H namespace Eigen { namespace internal { static _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f); static _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f); static _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f); static _EIGEN_DECLARE_CONST_Packet4i(23, 23); static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inv_mant_mask, ~0x7f800000); /* the smallest non denormalized float number */ static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(min_norm_pos, 0x00800000); static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_inf, 0xff800000); // -1.f/0.f static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_nan, 0xffffffff); /* natural logarithm computed for 4 simultaneous float return NaN for x <= 0 */ static _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f); static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292E-2f); static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, - 1.1514610310E-1f); static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740E-1f); static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, - 1.2420140846E-1f); static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, + 1.4249322787E-1f); static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, - 1.6668057665E-1f); static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, + 2.0000714765E-1f); static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, - 2.4999993993E-1f); static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, + 3.3333331174E-1f); static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f); static _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f); static _EIGEN_DECLARE_CONST_Packet4f(exp_hi, 88.3762626647950f); static _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f); static _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f); static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f); static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f); static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f); static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f); static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f); static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f); static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f); static _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f); #ifdef __VSX__ static _EIGEN_DECLARE_CONST_Packet2d(1 , 1.0); static _EIGEN_DECLARE_CONST_Packet2d(2 , 2.0); static _EIGEN_DECLARE_CONST_Packet2d(half, 0.5); static _EIGEN_DECLARE_CONST_Packet2d(exp_hi, 709.437); static _EIGEN_DECLARE_CONST_Packet2d(exp_lo, -709.436139303); static _EIGEN_DECLARE_CONST_Packet2d(cephes_LOG2EF, 1.4426950408889634073599); static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p0, 1.26177193074810590878e-4); static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p1, 3.02994407707441961300e-2); static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p2, 9.99999999999999999910e-1); static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q0, 3.00198505138664455042e-6); static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q1, 2.52448340349684104192e-3); static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q2, 2.27265548208155028766e-1); static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q3, 2.00000000000000000009e0); static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C1, 0.693145751953125); static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6); #ifdef __POWER8_VECTOR__ static Packet2l p2l_1023 = { 1023, 1023 }; static Packet2ul p2ul_52 = { 52, 52 }; #endif #endif template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f plog<Packet4f>(const Packet4f& _x) { Packet4f x = _x; Packet4i emm0; /* isvalid_mask is 0 if x < 0 or x is NaN. */ Packet4ui isvalid_mask = reinterpret_cast<Packet4ui>(vec_cmpge(x, p4f_ZERO)); Packet4ui iszero_mask = reinterpret_cast<Packet4ui>(vec_cmpeq(x, p4f_ZERO)); x = pmax(x, p4f_min_norm_pos); /* cut off denormalized stuff */ emm0 = vec_sr(reinterpret_cast<Packet4i>(x), reinterpret_cast<Packet4ui>(p4i_23)); /* keep only the fractional part */ x = pand(x, p4f_inv_mant_mask); x = por(x, p4f_half); emm0 = psub(emm0, p4i_0x7f); Packet4f e = padd(vec_ctf(emm0, 0), p4f_1); /* part2: if( x < SQRTHF ) { e -= 1; x = x + x - 1.0; } else { x = x - 1.0; } */ Packet4f mask = reinterpret_cast<Packet4f>(vec_cmplt(x, p4f_cephes_SQRTHF)); Packet4f tmp = pand(x, mask); x = psub(x, p4f_1); e = psub(e, pand(p4f_1, mask)); x = padd(x, tmp); Packet4f x2 = pmul(x,x); Packet4f x3 = pmul(x2,x); Packet4f y, y1, y2; y = pmadd(p4f_cephes_log_p0, x, p4f_cephes_log_p1); y1 = pmadd(p4f_cephes_log_p3, x, p4f_cephes_log_p4); y2 = pmadd(p4f_cephes_log_p6, x, p4f_cephes_log_p7); y = pmadd(y , x, p4f_cephes_log_p2); y1 = pmadd(y1, x, p4f_cephes_log_p5); y2 = pmadd(y2, x, p4f_cephes_log_p8); y = pmadd(y, x3, y1); y = pmadd(y, x3, y2); y = pmul(y, x3); y1 = pmul(e, p4f_cephes_log_q1); tmp = pmul(x2, p4f_half); y = padd(y, y1); x = psub(x, tmp); y2 = pmul(e, p4f_cephes_log_q2); x = padd(x, y); x = padd(x, y2); // negative arg will be NAN, 0 will be -INF x = vec_sel(x, p4f_minus_inf, iszero_mask); x = vec_sel(p4f_minus_nan, x, isvalid_mask); return x; } template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f pexp<Packet4f>(const Packet4f& _x) { Packet4f x = _x; Packet4f tmp, fx; Packet4i emm0; // clamp x x = pmax(pmin(x, p4f_exp_hi), p4f_exp_lo); // express exp(x) as exp(g + n*log(2)) fx = pmadd(x, p4f_cephes_LOG2EF, p4f_half); fx = pfloor(fx); tmp = pmul(fx, p4f_cephes_exp_C1); Packet4f z = pmul(fx, p4f_cephes_exp_C2); x = psub(x, tmp); x = psub(x, z); z = pmul(x,x); Packet4f y = p4f_cephes_exp_p0; y = pmadd(y, x, p4f_cephes_exp_p1); y = pmadd(y, x, p4f_cephes_exp_p2); y = pmadd(y, x, p4f_cephes_exp_p3); y = pmadd(y, x, p4f_cephes_exp_p4); y = pmadd(y, x, p4f_cephes_exp_p5); y = pmadd(y, z, x); y = padd(y, p4f_1); // build 2^n emm0 = vec_cts(fx, 0); emm0 = vec_add(emm0, p4i_0x7f); emm0 = vec_sl(emm0, reinterpret_cast<Packet4ui>(p4i_23)); // Altivec's max & min operators just drop silent NaNs. Check NaNs in // inputs and return them unmodified. Packet4ui isnumber_mask = reinterpret_cast<Packet4ui>(vec_cmpeq(_x, _x)); return vec_sel(_x, pmax(pmul(y, reinterpret_cast<Packet4f>(emm0)), _x), isnumber_mask); } #ifndef EIGEN_COMP_CLANG template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f prsqrt<Packet4f>(const Packet4f& x) { return vec_rsqrt(x); } #endif #ifdef __VSX__ #ifndef EIGEN_COMP_CLANG template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d prsqrt<Packet2d>(const Packet2d& x) { return vec_rsqrt(x); } #endif template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f psqrt<Packet4f>(const Packet4f& x) { return vec_sqrt(x); } template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d psqrt<Packet2d>(const Packet2d& x) { return vec_sqrt(x); } // VSX support varies between different compilers and even different // versions of the same compiler. For gcc version >= 4.9.3, we can use // vec_cts to efficiently convert Packet2d to Packet2l. Otherwise, use // a slow version that works with older compilers. // Update: apparently vec_cts/vec_ctf intrinsics for 64-bit doubles // are buggy, https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70963 static inline Packet2l ConvertToPacket2l(const Packet2d& x) { #if EIGEN_GNUC_AT_LEAST(5, 4) || \ (EIGEN_GNUC_AT(6, 1) && __GNUC_PATCHLEVEL__ >= 1) return vec_cts(x, 0); // TODO: check clang version. #else double tmp[2]; memcpy(tmp, &x, sizeof(tmp)); Packet2l l = { static_cast<long long>(tmp[0]), static_cast<long long>(tmp[1]) }; return l; #endif } template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d pexp<Packet2d>(const Packet2d& _x) { Packet2d x = _x; Packet2d tmp, fx; Packet2l emm0; // clamp x x = pmax(pmin(x, p2d_exp_hi), p2d_exp_lo); /* express exp(x) as exp(g + n*log(2)) */ fx = pmadd(x, p2d_cephes_LOG2EF, p2d_half); fx = pfloor(fx); tmp = pmul(fx, p2d_cephes_exp_C1); Packet2d z = pmul(fx, p2d_cephes_exp_C2); x = psub(x, tmp); x = psub(x, z); Packet2d x2 = pmul(x,x); Packet2d px = p2d_cephes_exp_p0; px = pmadd(px, x2, p2d_cephes_exp_p1); px = pmadd(px, x2, p2d_cephes_exp_p2); px = pmul (px, x); Packet2d qx = p2d_cephes_exp_q0; qx = pmadd(qx, x2, p2d_cephes_exp_q1); qx = pmadd(qx, x2, p2d_cephes_exp_q2); qx = pmadd(qx, x2, p2d_cephes_exp_q3); x = pdiv(px,psub(qx,px)); x = pmadd(p2d_2,x,p2d_1); // build 2^n emm0 = ConvertToPacket2l(fx); #ifdef __POWER8_VECTOR__ emm0 = vec_add(emm0, p2l_1023); emm0 = vec_sl(emm0, p2ul_52); #else // Code is a bit complex for POWER7. There is actually a // vec_xxsldi intrinsic but it is not supported by some gcc versions. // So we shift (52-32) bits and do a word swap with zeros. _EIGEN_DECLARE_CONST_Packet4i(1023, 1023); _EIGEN_DECLARE_CONST_Packet4i(20, 20); // 52 - 32 Packet4i emm04i = reinterpret_cast<Packet4i>(emm0); emm04i = vec_add(emm04i, p4i_1023); emm04i = vec_sl(emm04i, reinterpret_cast<Packet4ui>(p4i_20)); static const Packet16uc perm = { 0x14, 0x15, 0x16, 0x17, 0x00, 0x01, 0x02, 0x03, 0x1c, 0x1d, 0x1e, 0x1f, 0x08, 0x09, 0x0a, 0x0b }; #ifdef _BIG_ENDIAN emm0 = reinterpret_cast<Packet2l>(vec_perm(p4i_ZERO, emm04i, perm)); #else emm0 = reinterpret_cast<Packet2l>(vec_perm(emm04i, p4i_ZERO, perm)); #endif #endif // Altivec's max & min operators just drop silent NaNs. Check NaNs in // inputs and return them unmodified. Packet2ul isnumber_mask = reinterpret_cast<Packet2ul>(vec_cmpeq(_x, _x)); return vec_sel(_x, pmax(pmul(x, reinterpret_cast<Packet2d>(emm0)), _x), isnumber_mask); } #endif } // end namespace internal } // end namespace Eigen #endif // EIGEN_MATH_FUNCTIONS_ALTIVEC_H
10,797
32.430341
85
h
abess
abess-master/python/include/Eigen/src/Core/arch/CUDA/MathFunctions.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATH_FUNCTIONS_CUDA_H #define EIGEN_MATH_FUNCTIONS_CUDA_H namespace Eigen { namespace internal { // Make sure this is only available when targeting a GPU: we don't want to // introduce conflicts between these packet_traits definitions and the ones // we'll use on the host side (SSE, AVX, ...) #if defined(__CUDACC__) && defined(EIGEN_USE_GPU) template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 plog<float4>(const float4& a) { return make_float4(logf(a.x), logf(a.y), logf(a.z), logf(a.w)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 plog<double2>(const double2& a) { using ::log; return make_double2(log(a.x), log(a.y)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 plog1p<float4>(const float4& a) { return make_float4(log1pf(a.x), log1pf(a.y), log1pf(a.z), log1pf(a.w)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 plog1p<double2>(const double2& a) { return make_double2(log1p(a.x), log1p(a.y)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pexp<float4>(const float4& a) { return make_float4(expf(a.x), expf(a.y), expf(a.z), expf(a.w)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pexp<double2>(const double2& a) { using ::exp; return make_double2(exp(a.x), exp(a.y)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 psqrt<float4>(const float4& a) { return make_float4(sqrtf(a.x), sqrtf(a.y), sqrtf(a.z), sqrtf(a.w)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 psqrt<double2>(const double2& a) { using ::sqrt; return make_double2(sqrt(a.x), sqrt(a.y)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 prsqrt<float4>(const float4& a) { return make_float4(rsqrtf(a.x), rsqrtf(a.y), rsqrtf(a.z), rsqrtf(a.w)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 prsqrt<double2>(const double2& a) { return make_double2(rsqrt(a.x), rsqrt(a.y)); } #endif } // end namespace internal } // end namespace Eigen #endif // EIGEN_MATH_FUNCTIONS_CUDA_H
2,387
24.956522
75
h
abess
abess-master/python/include/Eigen/src/Core/arch/CUDA/PacketMath.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PACKET_MATH_CUDA_H #define EIGEN_PACKET_MATH_CUDA_H namespace Eigen { namespace internal { // Make sure this is only available when targeting a GPU: we don't want to // introduce conflicts between these packet_traits definitions and the ones // we'll use on the host side (SSE, AVX, ...) #if defined(__CUDACC__) && defined(EIGEN_USE_GPU) template<> struct is_arithmetic<float4> { enum { value = true }; }; template<> struct is_arithmetic<double2> { enum { value = true }; }; template<> struct packet_traits<float> : default_packet_traits { typedef float4 type; typedef float4 half; enum { Vectorizable = 1, AlignedOnScalar = 1, size=4, HasHalfPacket = 0, HasDiv = 1, HasSin = 0, HasCos = 0, HasLog = 1, HasExp = 1, HasSqrt = 1, HasRsqrt = 1, HasLGamma = 1, HasDiGamma = 1, HasZeta = 1, HasPolygamma = 1, HasErf = 1, HasErfc = 1, HasIGamma = 1, HasIGammac = 1, HasBetaInc = 1, HasBlend = 0, }; }; template<> struct packet_traits<double> : default_packet_traits { typedef double2 type; typedef double2 half; enum { Vectorizable = 1, AlignedOnScalar = 1, size=2, HasHalfPacket = 0, HasDiv = 1, HasLog = 1, HasExp = 1, HasSqrt = 1, HasRsqrt = 1, HasLGamma = 1, HasDiGamma = 1, HasZeta = 1, HasPolygamma = 1, HasErf = 1, HasErfc = 1, HasIGamma = 1, HasIGammac = 1, HasBetaInc = 1, HasBlend = 0, }; }; template<> struct unpacket_traits<float4> { typedef float type; enum {size=4, alignment=Aligned16}; typedef float4 half; }; template<> struct unpacket_traits<double2> { typedef double type; enum {size=2, alignment=Aligned16}; typedef double2 half; }; template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pset1<float4>(const float& from) { return make_float4(from, from, from, from); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pset1<double2>(const double& from) { return make_double2(from, from); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 plset<float4>(const float& a) { return make_float4(a, a+1, a+2, a+3); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 plset<double2>(const double& a) { return make_double2(a, a+1); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 padd<float4>(const float4& a, const float4& b) { return make_float4(a.x+b.x, a.y+b.y, a.z+b.z, a.w+b.w); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 padd<double2>(const double2& a, const double2& b) { return make_double2(a.x+b.x, a.y+b.y); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 psub<float4>(const float4& a, const float4& b) { return make_float4(a.x-b.x, a.y-b.y, a.z-b.z, a.w-b.w); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 psub<double2>(const double2& a, const double2& b) { return make_double2(a.x-b.x, a.y-b.y); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pnegate(const float4& a) { return make_float4(-a.x, -a.y, -a.z, -a.w); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pnegate(const double2& a) { return make_double2(-a.x, -a.y); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pconj(const float4& a) { return a; } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pconj(const double2& a) { return a; } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pmul<float4>(const float4& a, const float4& b) { return make_float4(a.x*b.x, a.y*b.y, a.z*b.z, a.w*b.w); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pmul<double2>(const double2& a, const double2& b) { return make_double2(a.x*b.x, a.y*b.y); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pdiv<float4>(const float4& a, const float4& b) { return make_float4(a.x/b.x, a.y/b.y, a.z/b.z, a.w/b.w); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pdiv<double2>(const double2& a, const double2& b) { return make_double2(a.x/b.x, a.y/b.y); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pmin<float4>(const float4& a, const float4& b) { return make_float4(fminf(a.x, b.x), fminf(a.y, b.y), fminf(a.z, b.z), fminf(a.w, b.w)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pmin<double2>(const double2& a, const double2& b) { return make_double2(fmin(a.x, b.x), fmin(a.y, b.y)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pmax<float4>(const float4& a, const float4& b) { return make_float4(fmaxf(a.x, b.x), fmaxf(a.y, b.y), fmaxf(a.z, b.z), fmaxf(a.w, b.w)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pmax<double2>(const double2& a, const double2& b) { return make_double2(fmax(a.x, b.x), fmax(a.y, b.y)); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pload<float4>(const float* from) { return *reinterpret_cast<const float4*>(from); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pload<double2>(const double* from) { return *reinterpret_cast<const double2*>(from); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 ploadu<float4>(const float* from) { return make_float4(from[0], from[1], from[2], from[3]); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 ploadu<double2>(const double* from) { return make_double2(from[0], from[1]); } template<> EIGEN_STRONG_INLINE float4 ploaddup<float4>(const float* from) { return make_float4(from[0], from[0], from[1], from[1]); } template<> EIGEN_STRONG_INLINE double2 ploaddup<double2>(const double* from) { return make_double2(from[0], from[0]); } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstore<float>(float* to, const float4& from) { *reinterpret_cast<float4*>(to) = from; } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstore<double>(double* to, const double2& from) { *reinterpret_cast<double2*>(to) = from; } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const float4& from) { to[0] = from.x; to[1] = from.y; to[2] = from.z; to[3] = from.w; } template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const double2& from) { to[0] = from.x; to[1] = from.y; } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float4 ploadt_ro<float4, Aligned>(const float* from) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350 return __ldg((const float4*)from); #else return make_float4(from[0], from[1], from[2], from[3]); #endif } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double2 ploadt_ro<double2, Aligned>(const double* from) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350 return __ldg((const double2*)from); #else return make_double2(from[0], from[1]); #endif } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float4 ploadt_ro<float4, Unaligned>(const float* from) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350 return make_float4(__ldg(from+0), __ldg(from+1), __ldg(from+2), __ldg(from+3)); #else return make_float4(from[0], from[1], from[2], from[3]); #endif } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double2 ploadt_ro<double2, Unaligned>(const double* from) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350 return make_double2(__ldg(from+0), __ldg(from+1)); #else return make_double2(from[0], from[1]); #endif } template<> EIGEN_DEVICE_FUNC inline float4 pgather<float, float4>(const float* from, Index stride) { return make_float4(from[0*stride], from[1*stride], from[2*stride], from[3*stride]); } template<> EIGEN_DEVICE_FUNC inline double2 pgather<double, double2>(const double* from, Index stride) { return make_double2(from[0*stride], from[1*stride]); } template<> EIGEN_DEVICE_FUNC inline void pscatter<float, float4>(float* to, const float4& from, Index stride) { to[stride*0] = from.x; to[stride*1] = from.y; to[stride*2] = from.z; to[stride*3] = from.w; } template<> EIGEN_DEVICE_FUNC inline void pscatter<double, double2>(double* to, const double2& from, Index stride) { to[stride*0] = from.x; to[stride*1] = from.y; } template<> EIGEN_DEVICE_FUNC inline float pfirst<float4>(const float4& a) { return a.x; } template<> EIGEN_DEVICE_FUNC inline double pfirst<double2>(const double2& a) { return a.x; } template<> EIGEN_DEVICE_FUNC inline float predux<float4>(const float4& a) { return a.x + a.y + a.z + a.w; } template<> EIGEN_DEVICE_FUNC inline double predux<double2>(const double2& a) { return a.x + a.y; } template<> EIGEN_DEVICE_FUNC inline float predux_max<float4>(const float4& a) { return fmaxf(fmaxf(a.x, a.y), fmaxf(a.z, a.w)); } template<> EIGEN_DEVICE_FUNC inline double predux_max<double2>(const double2& a) { return fmax(a.x, a.y); } template<> EIGEN_DEVICE_FUNC inline float predux_min<float4>(const float4& a) { return fminf(fminf(a.x, a.y), fminf(a.z, a.w)); } template<> EIGEN_DEVICE_FUNC inline double predux_min<double2>(const double2& a) { return fmin(a.x, a.y); } template<> EIGEN_DEVICE_FUNC inline float predux_mul<float4>(const float4& a) { return a.x * a.y * a.z * a.w; } template<> EIGEN_DEVICE_FUNC inline double predux_mul<double2>(const double2& a) { return a.x * a.y; } template<> EIGEN_DEVICE_FUNC inline float4 pabs<float4>(const float4& a) { return make_float4(fabsf(a.x), fabsf(a.y), fabsf(a.z), fabsf(a.w)); } template<> EIGEN_DEVICE_FUNC inline double2 pabs<double2>(const double2& a) { return make_double2(fabs(a.x), fabs(a.y)); } EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<float4,4>& kernel) { float tmp = kernel.packet[0].y; kernel.packet[0].y = kernel.packet[1].x; kernel.packet[1].x = tmp; tmp = kernel.packet[0].z; kernel.packet[0].z = kernel.packet[2].x; kernel.packet[2].x = tmp; tmp = kernel.packet[0].w; kernel.packet[0].w = kernel.packet[3].x; kernel.packet[3].x = tmp; tmp = kernel.packet[1].z; kernel.packet[1].z = kernel.packet[2].y; kernel.packet[2].y = tmp; tmp = kernel.packet[1].w; kernel.packet[1].w = kernel.packet[3].y; kernel.packet[3].y = tmp; tmp = kernel.packet[2].w; kernel.packet[2].w = kernel.packet[3].z; kernel.packet[3].z = tmp; } EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<double2,2>& kernel) { double tmp = kernel.packet[0].y; kernel.packet[0].y = kernel.packet[1].x; kernel.packet[1].x = tmp; } #endif } // end namespace internal } // end namespace Eigen #endif // EIGEN_PACKET_MATH_CUDA_H
10,744
31.170659
126
h
abess
abess-master/python/include/Eigen/src/Core/arch/CUDA/TypeCasting.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_TYPE_CASTING_CUDA_H #define EIGEN_TYPE_CASTING_CUDA_H namespace Eigen { namespace internal { template<> struct scalar_cast_op<float, Eigen::half> { EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op) typedef Eigen::half result_type; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half operator() (const float& a) const { #if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 return __float2half(a); #else return Eigen::half(a); #endif } }; template<> struct functor_traits<scalar_cast_op<float, Eigen::half> > { enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; }; template<> struct scalar_cast_op<int, Eigen::half> { EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op) typedef Eigen::half result_type; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half operator() (const int& a) const { #if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 return __float2half(static_cast<float>(a)); #else return Eigen::half(static_cast<float>(a)); #endif } }; template<> struct functor_traits<scalar_cast_op<int, Eigen::half> > { enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; }; template<> struct scalar_cast_op<Eigen::half, float> { EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op) typedef float result_type; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float operator() (const Eigen::half& a) const { #if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 return __half2float(a); #else return static_cast<float>(a); #endif } }; template<> struct functor_traits<scalar_cast_op<Eigen::half, float> > { enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; }; #if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 template <> struct type_casting_traits<Eigen::half, float> { enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 }; }; template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pcast<half2, float4>(const half2& a, const half2& b) { float2 r1 = __half22float2(a); float2 r2 = __half22float2(b); return make_float4(r1.x, r1.y, r2.x, r2.y); } template <> struct type_casting_traits<float, Eigen::half> { enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 }; }; template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pcast<float4, half2>(const float4& a) { // Simply discard the second half of the input return __floats2half2_rn(a.x, a.y); } #elif defined EIGEN_VECTORIZE_AVX512 template <> struct type_casting_traits<half, float> { enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; }; template<> EIGEN_STRONG_INLINE Packet16f pcast<Packet16h, Packet16f>(const Packet16h& a) { return half2float(a); } template <> struct type_casting_traits<float, half> { enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; }; template<> EIGEN_STRONG_INLINE Packet16h pcast<Packet16f, Packet16h>(const Packet16f& a) { return float2half(a); } #elif defined EIGEN_VECTORIZE_AVX template <> struct type_casting_traits<Eigen::half, float> { enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; }; template<> EIGEN_STRONG_INLINE Packet8f pcast<Packet8h, Packet8f>(const Packet8h& a) { return half2float(a); } template <> struct type_casting_traits<float, Eigen::half> { enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; }; template<> EIGEN_STRONG_INLINE Packet8h pcast<Packet8f, Packet8h>(const Packet8f& a) { return float2half(a); } // Disable the following code since it's broken on too many platforms / compilers. //#elif defined(EIGEN_VECTORIZE_SSE) && (!EIGEN_ARCH_x86_64) && (!EIGEN_COMP_MSVC) #elif 0 template <> struct type_casting_traits<Eigen::half, float> { enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; }; template<> EIGEN_STRONG_INLINE Packet4f pcast<Packet4h, Packet4f>(const Packet4h& a) { __int64_t a64 = _mm_cvtm64_si64(a.x); Eigen::half h = raw_uint16_to_half(static_cast<unsigned short>(a64)); float f1 = static_cast<float>(h); h = raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16)); float f2 = static_cast<float>(h); h = raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32)); float f3 = static_cast<float>(h); h = raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48)); float f4 = static_cast<float>(h); return _mm_set_ps(f4, f3, f2, f1); } template <> struct type_casting_traits<float, Eigen::half> { enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; }; template<> EIGEN_STRONG_INLINE Packet4h pcast<Packet4f, Packet4h>(const Packet4f& a) { EIGEN_ALIGN16 float aux[4]; pstore(aux, a); Eigen::half h0(aux[0]); Eigen::half h1(aux[1]); Eigen::half h2(aux[2]); Eigen::half h3(aux[3]); Packet4h result; result.x = _mm_set_pi16(h3.x, h2.x, h1.x, h0.x); return result; } #endif } // end namespace internal } // end namespace Eigen #endif // EIGEN_TYPE_CASTING_CUDA_H
5,509
24.868545
110
h
abess
abess-master/python/include/Eigen/src/Core/arch/Default/Settings.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud <[email protected]> // Copyright (C) 2006-2008 Benoit Jacob <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. /* All the parameters defined in this file can be specialized in the * architecture specific files, and/or by the user. * More to come... */ #ifndef EIGEN_DEFAULT_SETTINGS_H #define EIGEN_DEFAULT_SETTINGS_H /** Defines the maximal loop size to enable meta unrolling of loops. * Note that the value here is expressed in Eigen's own notion of "number of FLOPS", * it does not correspond to the number of iterations or the number of instructions */ #ifndef EIGEN_UNROLLING_LIMIT #define EIGEN_UNROLLING_LIMIT 100 #endif /** Defines the threshold between a "small" and a "large" matrix. * This threshold is mainly used to select the proper product implementation. */ #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 #endif /** Defines the maximal width of the blocks used in the triangular product and solver * for vectors (level 2 blas xTRMV and xTRSV). The default is 8. */ #ifndef EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH #define EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH 8 #endif /** Defines the default number of registers available for that architecture. * Currently it must be 8 or 16. Other values will fail. */ #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 8 #endif #endif // EIGEN_DEFAULT_SETTINGS_H
1,746
33.94
85
h
abess
abess-master/python/include/Eigen/src/Core/arch/NEON/MathFunctions.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. /* The sin, cos, exp, and log functions of this file come from * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/ */ #ifndef EIGEN_MATH_FUNCTIONS_NEON_H #define EIGEN_MATH_FUNCTIONS_NEON_H namespace Eigen { namespace internal { template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f pexp<Packet4f>(const Packet4f& _x) { Packet4f x = _x; Packet4f tmp, fx; _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f); _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f); _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f); _EIGEN_DECLARE_CONST_Packet4f(exp_hi, 88.3762626647950f); _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f); _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f); _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f); _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f); _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f); _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f); _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f); _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f); _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f); _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f); x = vminq_f32(x, p4f_exp_hi); x = vmaxq_f32(x, p4f_exp_lo); /* express exp(x) as exp(g + n*log(2)) */ fx = vmlaq_f32(p4f_half, x, p4f_cephes_LOG2EF); /* perform a floorf */ tmp = vcvtq_f32_s32(vcvtq_s32_f32(fx)); /* if greater, substract 1 */ Packet4ui mask = vcgtq_f32(tmp, fx); mask = vandq_u32(mask, vreinterpretq_u32_f32(p4f_1)); fx = vsubq_f32(tmp, vreinterpretq_f32_u32(mask)); tmp = vmulq_f32(fx, p4f_cephes_exp_C1); Packet4f z = vmulq_f32(fx, p4f_cephes_exp_C2); x = vsubq_f32(x, tmp); x = vsubq_f32(x, z); Packet4f y = vmulq_f32(p4f_cephes_exp_p0, x); z = vmulq_f32(x, x); y = vaddq_f32(y, p4f_cephes_exp_p1); y = vmulq_f32(y, x); y = vaddq_f32(y, p4f_cephes_exp_p2); y = vmulq_f32(y, x); y = vaddq_f32(y, p4f_cephes_exp_p3); y = vmulq_f32(y, x); y = vaddq_f32(y, p4f_cephes_exp_p4); y = vmulq_f32(y, x); y = vaddq_f32(y, p4f_cephes_exp_p5); y = vmulq_f32(y, z); y = vaddq_f32(y, x); y = vaddq_f32(y, p4f_1); /* build 2^n */ int32x4_t mm; mm = vcvtq_s32_f32(fx); mm = vaddq_s32(mm, p4i_0x7f); mm = vshlq_n_s32(mm, 23); Packet4f pow2n = vreinterpretq_f32_s32(mm); y = vmulq_f32(y, pow2n); return y; } } // end namespace internal } // end namespace Eigen #endif // EIGEN_MATH_FUNCTIONS_NEON_H
2,846
29.945652
75
h
abess
abess-master/python/include/Eigen/src/Core/arch/SSE/MathFunctions.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2007 Julien Pommier // Copyright (C) 2009 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. /* The sin, cos, exp, and log functions of this file come from * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/ */ #ifndef EIGEN_MATH_FUNCTIONS_SSE_H #define EIGEN_MATH_FUNCTIONS_SSE_H namespace Eigen { namespace internal { template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f plog<Packet4f>(const Packet4f& _x) { Packet4f x = _x; _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f); _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f); _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f); _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inv_mant_mask, ~0x7f800000); /* the smallest non denormalized float number */ _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(min_norm_pos, 0x00800000); _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_inf, 0xff800000);//-1.f/0.f); /* natural logarithm computed for 4 simultaneous float return NaN for x <= 0 */ _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f); _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292E-2f); _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, - 1.1514610310E-1f); _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740E-1f); _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, - 1.2420140846E-1f); _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, + 1.4249322787E-1f); _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, - 1.6668057665E-1f); _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, + 2.0000714765E-1f); _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, - 2.4999993993E-1f); _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, + 3.3333331174E-1f); _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f); _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f); Packet4i emm0; Packet4f invalid_mask = _mm_cmpnge_ps(x, _mm_setzero_ps()); // not greater equal is true if x is NaN Packet4f iszero_mask = _mm_cmpeq_ps(x, _mm_setzero_ps()); x = pmax(x, p4f_min_norm_pos); /* cut off denormalized stuff */ emm0 = _mm_srli_epi32(_mm_castps_si128(x), 23); /* keep only the fractional part */ x = _mm_and_ps(x, p4f_inv_mant_mask); x = _mm_or_ps(x, p4f_half); emm0 = _mm_sub_epi32(emm0, p4i_0x7f); Packet4f e = padd(Packet4f(_mm_cvtepi32_ps(emm0)), p4f_1); /* part2: if( x < SQRTHF ) { e -= 1; x = x + x - 1.0; } else { x = x - 1.0; } */ Packet4f mask = _mm_cmplt_ps(x, p4f_cephes_SQRTHF); Packet4f tmp = pand(x, mask); x = psub(x, p4f_1); e = psub(e, pand(p4f_1, mask)); x = padd(x, tmp); Packet4f x2 = pmul(x,x); Packet4f x3 = pmul(x2,x); Packet4f y, y1, y2; y = pmadd(p4f_cephes_log_p0, x, p4f_cephes_log_p1); y1 = pmadd(p4f_cephes_log_p3, x, p4f_cephes_log_p4); y2 = pmadd(p4f_cephes_log_p6, x, p4f_cephes_log_p7); y = pmadd(y , x, p4f_cephes_log_p2); y1 = pmadd(y1, x, p4f_cephes_log_p5); y2 = pmadd(y2, x, p4f_cephes_log_p8); y = pmadd(y, x3, y1); y = pmadd(y, x3, y2); y = pmul(y, x3); y1 = pmul(e, p4f_cephes_log_q1); tmp = pmul(x2, p4f_half); y = padd(y, y1); x = psub(x, tmp); y2 = pmul(e, p4f_cephes_log_q2); x = padd(x, y); x = padd(x, y2); // negative arg will be NAN, 0 will be -INF return _mm_or_ps(_mm_andnot_ps(iszero_mask, _mm_or_ps(x, invalid_mask)), _mm_and_ps(iszero_mask, p4f_minus_inf)); } template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f pexp<Packet4f>(const Packet4f& _x) { Packet4f x = _x; _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f); _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f); _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f); _EIGEN_DECLARE_CONST_Packet4f(exp_hi, 88.3762626647950f); _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f); _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f); _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f); _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f); _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f); _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f); _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f); _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f); _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f); _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f); Packet4f tmp, fx; Packet4i emm0; // clamp x x = pmax(pmin(x, p4f_exp_hi), p4f_exp_lo); /* express exp(x) as exp(g + n*log(2)) */ fx = pmadd(x, p4f_cephes_LOG2EF, p4f_half); #ifdef EIGEN_VECTORIZE_SSE4_1 fx = _mm_floor_ps(fx); #else emm0 = _mm_cvttps_epi32(fx); tmp = _mm_cvtepi32_ps(emm0); /* if greater, substract 1 */ Packet4f mask = _mm_cmpgt_ps(tmp, fx); mask = _mm_and_ps(mask, p4f_1); fx = psub(tmp, mask); #endif tmp = pmul(fx, p4f_cephes_exp_C1); Packet4f z = pmul(fx, p4f_cephes_exp_C2); x = psub(x, tmp); x = psub(x, z); z = pmul(x,x); Packet4f y = p4f_cephes_exp_p0; y = pmadd(y, x, p4f_cephes_exp_p1); y = pmadd(y, x, p4f_cephes_exp_p2); y = pmadd(y, x, p4f_cephes_exp_p3); y = pmadd(y, x, p4f_cephes_exp_p4); y = pmadd(y, x, p4f_cephes_exp_p5); y = pmadd(y, z, x); y = padd(y, p4f_1); // build 2^n emm0 = _mm_cvttps_epi32(fx); emm0 = _mm_add_epi32(emm0, p4i_0x7f); emm0 = _mm_slli_epi32(emm0, 23); return pmax(pmul(y, Packet4f(_mm_castsi128_ps(emm0))), _x); } template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d pexp<Packet2d>(const Packet2d& _x) { Packet2d x = _x; _EIGEN_DECLARE_CONST_Packet2d(1 , 1.0); _EIGEN_DECLARE_CONST_Packet2d(2 , 2.0); _EIGEN_DECLARE_CONST_Packet2d(half, 0.5); _EIGEN_DECLARE_CONST_Packet2d(exp_hi, 709.437); _EIGEN_DECLARE_CONST_Packet2d(exp_lo, -709.436139303); _EIGEN_DECLARE_CONST_Packet2d(cephes_LOG2EF, 1.4426950408889634073599); _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p0, 1.26177193074810590878e-4); _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p1, 3.02994407707441961300e-2); _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p2, 9.99999999999999999910e-1); _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q0, 3.00198505138664455042e-6); _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q1, 2.52448340349684104192e-3); _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q2, 2.27265548208155028766e-1); _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q3, 2.00000000000000000009e0); _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C1, 0.693145751953125); _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6); static const __m128i p4i_1023_0 = _mm_setr_epi32(1023, 1023, 0, 0); Packet2d tmp, fx; Packet4i emm0; // clamp x x = pmax(pmin(x, p2d_exp_hi), p2d_exp_lo); /* express exp(x) as exp(g + n*log(2)) */ fx = pmadd(p2d_cephes_LOG2EF, x, p2d_half); #ifdef EIGEN_VECTORIZE_SSE4_1 fx = _mm_floor_pd(fx); #else emm0 = _mm_cvttpd_epi32(fx); tmp = _mm_cvtepi32_pd(emm0); /* if greater, substract 1 */ Packet2d mask = _mm_cmpgt_pd(tmp, fx); mask = _mm_and_pd(mask, p2d_1); fx = psub(tmp, mask); #endif tmp = pmul(fx, p2d_cephes_exp_C1); Packet2d z = pmul(fx, p2d_cephes_exp_C2); x = psub(x, tmp); x = psub(x, z); Packet2d x2 = pmul(x,x); Packet2d px = p2d_cephes_exp_p0; px = pmadd(px, x2, p2d_cephes_exp_p1); px = pmadd(px, x2, p2d_cephes_exp_p2); px = pmul (px, x); Packet2d qx = p2d_cephes_exp_q0; qx = pmadd(qx, x2, p2d_cephes_exp_q1); qx = pmadd(qx, x2, p2d_cephes_exp_q2); qx = pmadd(qx, x2, p2d_cephes_exp_q3); x = pdiv(px,psub(qx,px)); x = pmadd(p2d_2,x,p2d_1); // build 2^n emm0 = _mm_cvttpd_epi32(fx); emm0 = _mm_add_epi32(emm0, p4i_1023_0); emm0 = _mm_slli_epi32(emm0, 20); emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(1,2,0,3)); return pmax(pmul(x, Packet2d(_mm_castsi128_pd(emm0))), _x); } /* evaluation of 4 sines at onces, using SSE2 intrinsics. The code is the exact rewriting of the cephes sinf function. Precision is excellent as long as x < 8192 (I did not bother to take into account the special handling they have for greater values -- it does not return garbage for arguments over 8192, though, but the extra precision is missing). Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the surprising but correct result. */ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f psin<Packet4f>(const Packet4f& _x) { Packet4f x = _x; _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f); _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f); _EIGEN_DECLARE_CONST_Packet4i(1, 1); _EIGEN_DECLARE_CONST_Packet4i(not1, ~1); _EIGEN_DECLARE_CONST_Packet4i(2, 2); _EIGEN_DECLARE_CONST_Packet4i(4, 4); _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(sign_mask, 0x80000000); _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1,-0.78515625f); _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f); _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f); _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891E-4f); _EIGEN_DECLARE_CONST_Packet4f(sincof_p1, 8.3321608736E-3f); _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611E-1f); _EIGEN_DECLARE_CONST_Packet4f(coscof_p0, 2.443315711809948E-005f); _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765E-003f); _EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827E-002f); _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI Packet4f xmm1, xmm2, xmm3, sign_bit, y; Packet4i emm0, emm2; sign_bit = x; /* take the absolute value */ x = pabs(x); /* take the modulo */ /* extract the sign bit (upper one) */ sign_bit = _mm_and_ps(sign_bit, p4f_sign_mask); /* scale by 4/Pi */ y = pmul(x, p4f_cephes_FOPI); /* store the integer part of y in mm0 */ emm2 = _mm_cvttps_epi32(y); /* j=(j+1) & (~1) (see the cephes sources) */ emm2 = _mm_add_epi32(emm2, p4i_1); emm2 = _mm_and_si128(emm2, p4i_not1); y = _mm_cvtepi32_ps(emm2); /* get the swap sign flag */ emm0 = _mm_and_si128(emm2, p4i_4); emm0 = _mm_slli_epi32(emm0, 29); /* get the polynom selection mask there is one polynom for 0 <= x <= Pi/4 and another one for Pi/4<x<=Pi/2 Both branches will be computed. */ emm2 = _mm_and_si128(emm2, p4i_2); emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128()); Packet4f swap_sign_bit = _mm_castsi128_ps(emm0); Packet4f poly_mask = _mm_castsi128_ps(emm2); sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit); /* The magic pass: "Extended precision modular arithmetic" x = ((x - y * DP1) - y * DP2) - y * DP3; */ xmm1 = pmul(y, p4f_minus_cephes_DP1); xmm2 = pmul(y, p4f_minus_cephes_DP2); xmm3 = pmul(y, p4f_minus_cephes_DP3); x = padd(x, xmm1); x = padd(x, xmm2); x = padd(x, xmm3); /* Evaluate the first polynom (0 <= x <= Pi/4) */ y = p4f_coscof_p0; Packet4f z = _mm_mul_ps(x,x); y = pmadd(y, z, p4f_coscof_p1); y = pmadd(y, z, p4f_coscof_p2); y = pmul(y, z); y = pmul(y, z); Packet4f tmp = pmul(z, p4f_half); y = psub(y, tmp); y = padd(y, p4f_1); /* Evaluate the second polynom (Pi/4 <= x <= 0) */ Packet4f y2 = p4f_sincof_p0; y2 = pmadd(y2, z, p4f_sincof_p1); y2 = pmadd(y2, z, p4f_sincof_p2); y2 = pmul(y2, z); y2 = pmul(y2, x); y2 = padd(y2, x); /* select the correct result from the two polynoms */ y2 = _mm_and_ps(poly_mask, y2); y = _mm_andnot_ps(poly_mask, y); y = _mm_or_ps(y,y2); /* update the sign */ return _mm_xor_ps(y, sign_bit); } /* almost the same as psin */ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f pcos<Packet4f>(const Packet4f& _x) { Packet4f x = _x; _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f); _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f); _EIGEN_DECLARE_CONST_Packet4i(1, 1); _EIGEN_DECLARE_CONST_Packet4i(not1, ~1); _EIGEN_DECLARE_CONST_Packet4i(2, 2); _EIGEN_DECLARE_CONST_Packet4i(4, 4); _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1,-0.78515625f); _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f); _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f); _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891E-4f); _EIGEN_DECLARE_CONST_Packet4f(sincof_p1, 8.3321608736E-3f); _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611E-1f); _EIGEN_DECLARE_CONST_Packet4f(coscof_p0, 2.443315711809948E-005f); _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765E-003f); _EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827E-002f); _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI Packet4f xmm1, xmm2, xmm3, y; Packet4i emm0, emm2; x = pabs(x); /* scale by 4/Pi */ y = pmul(x, p4f_cephes_FOPI); /* get the integer part of y */ emm2 = _mm_cvttps_epi32(y); /* j=(j+1) & (~1) (see the cephes sources) */ emm2 = _mm_add_epi32(emm2, p4i_1); emm2 = _mm_and_si128(emm2, p4i_not1); y = _mm_cvtepi32_ps(emm2); emm2 = _mm_sub_epi32(emm2, p4i_2); /* get the swap sign flag */ emm0 = _mm_andnot_si128(emm2, p4i_4); emm0 = _mm_slli_epi32(emm0, 29); /* get the polynom selection mask */ emm2 = _mm_and_si128(emm2, p4i_2); emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128()); Packet4f sign_bit = _mm_castsi128_ps(emm0); Packet4f poly_mask = _mm_castsi128_ps(emm2); /* The magic pass: "Extended precision modular arithmetic" x = ((x - y * DP1) - y * DP2) - y * DP3; */ xmm1 = pmul(y, p4f_minus_cephes_DP1); xmm2 = pmul(y, p4f_minus_cephes_DP2); xmm3 = pmul(y, p4f_minus_cephes_DP3); x = padd(x, xmm1); x = padd(x, xmm2); x = padd(x, xmm3); /* Evaluate the first polynom (0 <= x <= Pi/4) */ y = p4f_coscof_p0; Packet4f z = pmul(x,x); y = pmadd(y,z,p4f_coscof_p1); y = pmadd(y,z,p4f_coscof_p2); y = pmul(y, z); y = pmul(y, z); Packet4f tmp = _mm_mul_ps(z, p4f_half); y = psub(y, tmp); y = padd(y, p4f_1); /* Evaluate the second polynom (Pi/4 <= x <= 0) */ Packet4f y2 = p4f_sincof_p0; y2 = pmadd(y2, z, p4f_sincof_p1); y2 = pmadd(y2, z, p4f_sincof_p2); y2 = pmul(y2, z); y2 = pmadd(y2, x, x); /* select the correct result from the two polynoms */ y2 = _mm_and_ps(poly_mask, y2); y = _mm_andnot_ps(poly_mask, y); y = _mm_or_ps(y,y2); /* update the sign */ return _mm_xor_ps(y, sign_bit); } #if EIGEN_FAST_MATH // Functions for sqrt. // The EIGEN_FAST_MATH version uses the _mm_rsqrt_ps approximation and one step // of Newton's method, at a cost of 1-2 bits of precision as opposed to the // exact solution. It does not handle +inf, or denormalized numbers correctly. // The main advantage of this approach is not just speed, but also the fact that // it can be inlined and pipelined with other computations, further reducing its // effective latency. This is similar to Quake3's fast inverse square root. // For detail see here: http://www.beyond3d.com/content/articles/8/ template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f psqrt<Packet4f>(const Packet4f& _x) { Packet4f half = pmul(_x, pset1<Packet4f>(.5f)); Packet4f denormal_mask = _mm_and_ps( _mm_cmpge_ps(_x, _mm_setzero_ps()), _mm_cmplt_ps(_x, pset1<Packet4f>((std::numeric_limits<float>::min)()))); // Compute approximate reciprocal sqrt. Packet4f x = _mm_rsqrt_ps(_x); // Do a single step of Newton's iteration. x = pmul(x, psub(pset1<Packet4f>(1.5f), pmul(half, pmul(x,x)))); // Flush results for denormals to zero. return _mm_andnot_ps(denormal_mask, pmul(_x,x)); } #else template<>EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f psqrt<Packet4f>(const Packet4f& x) { return _mm_sqrt_ps(x); } #endif template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d psqrt<Packet2d>(const Packet2d& x) { return _mm_sqrt_pd(x); } #if EIGEN_FAST_MATH template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f prsqrt<Packet4f>(const Packet4f& _x) { _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inf, 0x7f800000); _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(nan, 0x7fc00000); _EIGEN_DECLARE_CONST_Packet4f(one_point_five, 1.5f); _EIGEN_DECLARE_CONST_Packet4f(minus_half, -0.5f); _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(flt_min, 0x00800000); Packet4f neg_half = pmul(_x, p4f_minus_half); // select only the inverse sqrt of positive normal inputs (denormals are // flushed to zero and cause infs as well). Packet4f le_zero_mask = _mm_cmple_ps(_x, p4f_flt_min); Packet4f x = _mm_andnot_ps(le_zero_mask, _mm_rsqrt_ps(_x)); // Fill in NaNs and Infs for the negative/zero entries. Packet4f neg_mask = _mm_cmplt_ps(_x, _mm_setzero_ps()); Packet4f zero_mask = _mm_andnot_ps(neg_mask, le_zero_mask); Packet4f infs_and_nans = _mm_or_ps(_mm_and_ps(neg_mask, p4f_nan), _mm_and_ps(zero_mask, p4f_inf)); // Do a single step of Newton's iteration. x = pmul(x, pmadd(neg_half, pmul(x, x), p4f_one_point_five)); // Insert NaNs and Infs in all the right places. return _mm_or_ps(x, infs_and_nans); } #else template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f prsqrt<Packet4f>(const Packet4f& x) { // Unfortunately we can't use the much faster mm_rqsrt_ps since it only provides an approximation. return _mm_div_ps(pset1<Packet4f>(1.0f), _mm_sqrt_ps(x)); } #endif template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d prsqrt<Packet2d>(const Packet2d& x) { // Unfortunately we can't use the much faster mm_rqsrt_pd since it only provides an approximation. return _mm_div_pd(pset1<Packet2d>(1.0), _mm_sqrt_pd(x)); } // Hyperbolic Tangent function. template <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f ptanh<Packet4f>(const Packet4f& x) { return internal::generic_fast_tanh_float(x); } } // end namespace internal namespace numext { template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float sqrt(const float &x) { return internal::pfirst(internal::Packet4f(_mm_sqrt_ss(_mm_set_ss(x)))); } template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double sqrt(const double &x) { #if EIGEN_COMP_GNUC_STRICT // This works around a GCC bug generating poor code for _mm_sqrt_pd // See https://bitbucket.org/eigen/eigen/commits/14f468dba4d350d7c19c9b93072e19f7b3df563b return internal::pfirst(internal::Packet2d(__builtin_ia32_sqrtsd(_mm_set_sd(x)))); #else return internal::pfirst(internal::Packet2d(_mm_sqrt_pd(_mm_set_sd(x)))); #endif } } // end namespace numex } // end namespace Eigen #endif // EIGEN_MATH_FUNCTIONS_SSE_H
18,888
32.550622
102
h
abess
abess-master/python/include/Eigen/src/Core/arch/SSE/TypeCasting.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2015 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_TYPE_CASTING_SSE_H #define EIGEN_TYPE_CASTING_SSE_H namespace Eigen { namespace internal { template <> struct type_casting_traits<float, int> { enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; }; template<> EIGEN_STRONG_INLINE Packet4i pcast<Packet4f, Packet4i>(const Packet4f& a) { return _mm_cvttps_epi32(a); } template <> struct type_casting_traits<int, float> { enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 1 }; }; template<> EIGEN_STRONG_INLINE Packet4f pcast<Packet4i, Packet4f>(const Packet4i& a) { return _mm_cvtepi32_ps(a); } template <> struct type_casting_traits<double, float> { enum { VectorizedCast = 1, SrcCoeffRatio = 2, TgtCoeffRatio = 1 }; }; template<> EIGEN_STRONG_INLINE Packet4f pcast<Packet2d, Packet4f>(const Packet2d& a, const Packet2d& b) { return _mm_shuffle_ps(_mm_cvtpd_ps(a), _mm_cvtpd_ps(b), (1 << 2) | (1 << 6)); } template <> struct type_casting_traits<float, double> { enum { VectorizedCast = 1, SrcCoeffRatio = 1, TgtCoeffRatio = 2 }; }; template<> EIGEN_STRONG_INLINE Packet2d pcast<Packet4f, Packet2d>(const Packet4f& a) { // Simply discard the second half of the input return _mm_cvtps_pd(a); } } // end namespace internal } // end namespace Eigen #endif // EIGEN_TYPE_CASTING_SSE_H
1,726
21.141026
105
h
abess
abess-master/python/include/Eigen/src/Core/arch/ZVector/MathFunctions.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2007 Julien Pommier // Copyright (C) 2009 Gael Guennebaud <[email protected]> // Copyright (C) 2016 Konstantinos Margaritis <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. /* The sin, cos, exp, and log functions of this file come from * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/ */ #ifndef EIGEN_MATH_FUNCTIONS_ALTIVEC_H #define EIGEN_MATH_FUNCTIONS_ALTIVEC_H namespace Eigen { namespace internal { static _EIGEN_DECLARE_CONST_Packet2d(1 , 1.0); static _EIGEN_DECLARE_CONST_Packet2d(2 , 2.0); static _EIGEN_DECLARE_CONST_Packet2d(half, 0.5); static _EIGEN_DECLARE_CONST_Packet2d(exp_hi, 709.437); static _EIGEN_DECLARE_CONST_Packet2d(exp_lo, -709.436139303); static _EIGEN_DECLARE_CONST_Packet2d(cephes_LOG2EF, 1.4426950408889634073599); static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p0, 1.26177193074810590878e-4); static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p1, 3.02994407707441961300e-2); static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p2, 9.99999999999999999910e-1); static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q0, 3.00198505138664455042e-6); static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q1, 2.52448340349684104192e-3); static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q2, 2.27265548208155028766e-1); static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q3, 2.00000000000000000009e0); static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C1, 0.693145751953125); static _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6); template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d pexp<Packet2d>(const Packet2d& _x) { Packet2d x = _x; Packet2d tmp, fx; Packet2l emm0; // clamp x x = pmax(pmin(x, p2d_exp_hi), p2d_exp_lo); /* express exp(x) as exp(g + n*log(2)) */ fx = pmadd(p2d_cephes_LOG2EF, x, p2d_half); fx = vec_floor(fx); tmp = pmul(fx, p2d_cephes_exp_C1); Packet2d z = pmul(fx, p2d_cephes_exp_C2); x = psub(x, tmp); x = psub(x, z); Packet2d x2 = pmul(x,x); Packet2d px = p2d_cephes_exp_p0; px = pmadd(px, x2, p2d_cephes_exp_p1); px = pmadd(px, x2, p2d_cephes_exp_p2); px = pmul (px, x); Packet2d qx = p2d_cephes_exp_q0; qx = pmadd(qx, x2, p2d_cephes_exp_q1); qx = pmadd(qx, x2, p2d_cephes_exp_q2); qx = pmadd(qx, x2, p2d_cephes_exp_q3); x = pdiv(px,psub(qx,px)); x = pmadd(p2d_2,x,p2d_1); // build 2^n emm0 = vec_ctsl(fx, 0); static const Packet2l p2l_1023 = { 1023, 1023 }; static const Packet2ul p2ul_52 = { 52, 52 }; emm0 = emm0 + p2l_1023; emm0 = emm0 << reinterpret_cast<Packet2l>(p2ul_52); // Altivec's max & min operators just drop silent NaNs. Check NaNs in // inputs and return them unmodified. Packet2ul isnumber_mask = reinterpret_cast<Packet2ul>(vec_cmpeq(_x, _x)); return vec_sel(_x, pmax(pmul(x, reinterpret_cast<Packet2d>(emm0)), _x), isnumber_mask); } template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f pexp<Packet4f>(const Packet4f& x) { Packet4f res; res.v4f[0] = pexp<Packet2d>(x.v4f[0]); res.v4f[1] = pexp<Packet2d>(x.v4f[1]); return res; } template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d psqrt<Packet2d>(const Packet2d& x) { return __builtin_s390_vfsqdb(x); } template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f psqrt<Packet4f>(const Packet4f& x) { Packet4f res; res.v4f[0] = psqrt<Packet2d>(x.v4f[0]); res.v4f[1] = psqrt<Packet2d>(x.v4f[1]); return res; } template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d prsqrt<Packet2d>(const Packet2d& x) { // Unfortunately we can't use the much faster mm_rqsrt_pd since it only provides an approximation. return pset1<Packet2d>(1.0) / psqrt<Packet2d>(x); } template<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f prsqrt<Packet4f>(const Packet4f& x) { Packet4f res; res.v4f[0] = prsqrt<Packet2d>(x.v4f[0]); res.v4f[1] = prsqrt<Packet2d>(x.v4f[1]); return res; } } // end namespace internal } // end namespace Eigen #endif // EIGEN_MATH_FUNCTIONS_ALTIVEC_H
4,418
31.021739
100
h
abess
abess-master/python/include/Eigen/src/Core/functors/StlFunctors.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_STL_FUNCTORS_H #define EIGEN_STL_FUNCTORS_H namespace Eigen { namespace internal { // default functor traits for STL functors: template<typename T> struct functor_traits<std::multiplies<T> > { enum { Cost = NumTraits<T>::MulCost, PacketAccess = false }; }; template<typename T> struct functor_traits<std::divides<T> > { enum { Cost = NumTraits<T>::MulCost, PacketAccess = false }; }; template<typename T> struct functor_traits<std::plus<T> > { enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; }; template<typename T> struct functor_traits<std::minus<T> > { enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; }; template<typename T> struct functor_traits<std::negate<T> > { enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; }; template<typename T> struct functor_traits<std::logical_or<T> > { enum { Cost = 1, PacketAccess = false }; }; template<typename T> struct functor_traits<std::logical_and<T> > { enum { Cost = 1, PacketAccess = false }; }; template<typename T> struct functor_traits<std::logical_not<T> > { enum { Cost = 1, PacketAccess = false }; }; template<typename T> struct functor_traits<std::greater<T> > { enum { Cost = 1, PacketAccess = false }; }; template<typename T> struct functor_traits<std::less<T> > { enum { Cost = 1, PacketAccess = false }; }; template<typename T> struct functor_traits<std::greater_equal<T> > { enum { Cost = 1, PacketAccess = false }; }; template<typename T> struct functor_traits<std::less_equal<T> > { enum { Cost = 1, PacketAccess = false }; }; template<typename T> struct functor_traits<std::equal_to<T> > { enum { Cost = 1, PacketAccess = false }; }; template<typename T> struct functor_traits<std::not_equal_to<T> > { enum { Cost = 1, PacketAccess = false }; }; #if (__cplusplus < 201103L) && (EIGEN_COMP_MSVC <= 1900) // std::binder* are deprecated since c++11 and will be removed in c++17 template<typename T> struct functor_traits<std::binder2nd<T> > { enum { Cost = functor_traits<T>::Cost, PacketAccess = false }; }; template<typename T> struct functor_traits<std::binder1st<T> > { enum { Cost = functor_traits<T>::Cost, PacketAccess = false }; }; #endif template<typename T> struct functor_traits<std::unary_negate<T> > { enum { Cost = 1 + functor_traits<T>::Cost, PacketAccess = false }; }; template<typename T> struct functor_traits<std::binary_negate<T> > { enum { Cost = 1 + functor_traits<T>::Cost, PacketAccess = false }; }; #ifdef EIGEN_STDEXT_SUPPORT template<typename T0,typename T1> struct functor_traits<std::project1st<T0,T1> > { enum { Cost = 0, PacketAccess = false }; }; template<typename T0,typename T1> struct functor_traits<std::project2nd<T0,T1> > { enum { Cost = 0, PacketAccess = false }; }; template<typename T0,typename T1> struct functor_traits<std::select2nd<std::pair<T0,T1> > > { enum { Cost = 0, PacketAccess = false }; }; template<typename T0,typename T1> struct functor_traits<std::select1st<std::pair<T0,T1> > > { enum { Cost = 0, PacketAccess = false }; }; template<typename T0,typename T1> struct functor_traits<std::unary_compose<T0,T1> > { enum { Cost = functor_traits<T0>::Cost + functor_traits<T1>::Cost, PacketAccess = false }; }; template<typename T0,typename T1,typename T2> struct functor_traits<std::binary_compose<T0,T1,T2> > { enum { Cost = functor_traits<T0>::Cost + functor_traits<T1>::Cost + functor_traits<T2>::Cost, PacketAccess = false }; }; #endif // EIGEN_STDEXT_SUPPORT // allow to add new functors and specializations of functor_traits from outside Eigen. // this macro is really needed because functor_traits must be specialized after it is declared but before it is used... #ifdef EIGEN_FUNCTORS_PLUGIN #include EIGEN_FUNCTORS_PLUGIN #endif } // end namespace internal } // end namespace Eigen #endif // EIGEN_STL_FUNCTORS_H
4,184
30.466165
122
h
abess
abess-master/python/include/Eigen/src/Core/functors/TernaryFunctors.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Eugene Brevdo <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_TERNARY_FUNCTORS_H #define EIGEN_TERNARY_FUNCTORS_H namespace Eigen { namespace internal { //---------- associative ternary functors ---------- } // end namespace internal } // end namespace Eigen #endif // EIGEN_TERNARY_FUNCTORS_H
607
22.384615
69
h
abess
abess-master/python/include/Eigen/src/Core/products/Parallelizer.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PARALLELIZER_H #define EIGEN_PARALLELIZER_H namespace Eigen { namespace internal { /** \internal */ inline void manage_multi_threading(Action action, int* v) { static EIGEN_UNUSED int m_maxThreads = -1; if(action==SetAction) { eigen_internal_assert(v!=0); m_maxThreads = *v; } else if(action==GetAction) { eigen_internal_assert(v!=0); #ifdef EIGEN_HAS_OPENMP if(m_maxThreads>0) *v = m_maxThreads; else *v = omp_get_max_threads(); #else *v = 1; #endif } else { eigen_internal_assert(false); } } } /** Must be call first when calling Eigen from multiple threads */ inline void initParallel() { int nbt; internal::manage_multi_threading(GetAction, &nbt); std::ptrdiff_t l1, l2, l3; internal::manage_caching_sizes(GetAction, &l1, &l2, &l3); } /** \returns the max number of threads reserved for Eigen * \sa setNbThreads */ inline int nbThreads() { int ret; internal::manage_multi_threading(GetAction, &ret); return ret; } /** Sets the max number of threads reserved for Eigen * \sa nbThreads */ inline void setNbThreads(int v) { internal::manage_multi_threading(SetAction, &v); } namespace internal { template<typename Index> struct GemmParallelInfo { GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {} Index volatile sync; int volatile users; Index lhs_start; Index lhs_length; }; template<bool Condition, typename Functor, typename Index> void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose) { // TODO when EIGEN_USE_BLAS is defined, // we should still enable OMP for other scalar types #if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS) // FIXME the transpose variable is only needed to properly split // the matrix product when multithreading is enabled. This is a temporary // fix to support row-major destination matrices. This whole // parallelizer mechanism has to be redisigned anyway. EIGEN_UNUSED_VARIABLE(depth); EIGEN_UNUSED_VARIABLE(transpose); func(0,rows, 0,cols); #else // Dynamically check whether we should enable or disable OpenMP. // The conditions are: // - the max number of threads we can create is greater than 1 // - we are not already in a parallel code // - the sizes are large enough // compute the maximal number of threads from the size of the product: // This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once. Index size = transpose ? rows : cols; Index pb_max_threads = std::max<Index>(1,size / Functor::Traits::nr); // compute the maximal number of threads from the total amount of work: double work = static_cast<double>(rows) * static_cast<double>(cols) * static_cast<double>(depth); double kMinTaskSize = 50000; // FIXME improve this heuristic. pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, work / kMinTaskSize)); // compute the number of threads we are going to use Index threads = std::min<Index>(nbThreads(), pb_max_threads); // if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session, // then abort multi-threading // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp? if((!Condition) || (threads==1) || (omp_get_num_threads()>1)) return func(0,rows, 0,cols); Eigen::initParallel(); func.initParallelSession(threads); if(transpose) std::swap(rows,cols); ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0); #pragma omp parallel num_threads(threads) { Index i = omp_get_thread_num(); // Note that the actual number of threads might be lower than the number of request ones. Index actual_threads = omp_get_num_threads(); Index blockCols = (cols / actual_threads) & ~Index(0x3); Index blockRows = (rows / actual_threads); blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr; Index r0 = i*blockRows; Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows; Index c0 = i*blockCols; Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols; info[i].lhs_start = r0; info[i].lhs_length = actualBlockRows; if(transpose) func(c0, actualBlockCols, 0, rows, info); else func(0, rows, c0, actualBlockCols, info); } #endif } } // end namespace internal } // end namespace Eigen #endif // EIGEN_PARALLELIZER_H
4,905
28.914634
125
h
abess
abess-master/python/include/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h
/* Copyright (c) 2011, Intel Corporation. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************** * Content : Eigen bindings to BLAS F77 * Triangular matrix * matrix product functionality based on ?TRMM. ******************************************************************************** */ #ifndef EIGEN_TRIANGULAR_MATRIX_MATRIX_BLAS_H #define EIGEN_TRIANGULAR_MATRIX_MATRIX_BLAS_H namespace Eigen { namespace internal { template <typename Scalar, typename Index, int Mode, bool LhsIsTriangular, int LhsStorageOrder, bool ConjugateLhs, int RhsStorageOrder, bool ConjugateRhs, int ResStorageOrder> struct product_triangular_matrix_matrix_trmm : product_triangular_matrix_matrix<Scalar,Index,Mode, LhsIsTriangular,LhsStorageOrder,ConjugateLhs, RhsStorageOrder, ConjugateRhs, ResStorageOrder, BuiltIn> {}; // try to go to BLAS specialization #define EIGEN_BLAS_TRMM_SPECIALIZE(Scalar, LhsIsTriangular) \ template <typename Index, int Mode, \ int LhsStorageOrder, bool ConjugateLhs, \ int RhsStorageOrder, bool ConjugateRhs> \ struct product_triangular_matrix_matrix<Scalar,Index, Mode, LhsIsTriangular, \ LhsStorageOrder,ConjugateLhs, RhsStorageOrder,ConjugateRhs,ColMajor,Specialized> { \ static inline void run(Index _rows, Index _cols, Index _depth, const Scalar* _lhs, Index lhsStride,\ const Scalar* _rhs, Index rhsStride, Scalar* res, Index resStride, Scalar alpha, level3_blocking<Scalar,Scalar>& blocking) { \ product_triangular_matrix_matrix_trmm<Scalar,Index,Mode, \ LhsIsTriangular,LhsStorageOrder,ConjugateLhs, \ RhsStorageOrder, ConjugateRhs, ColMajor>::run( \ _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \ } \ }; EIGEN_BLAS_TRMM_SPECIALIZE(double, true) EIGEN_BLAS_TRMM_SPECIALIZE(double, false) EIGEN_BLAS_TRMM_SPECIALIZE(dcomplex, true) EIGEN_BLAS_TRMM_SPECIALIZE(dcomplex, false) EIGEN_BLAS_TRMM_SPECIALIZE(float, true) EIGEN_BLAS_TRMM_SPECIALIZE(float, false) EIGEN_BLAS_TRMM_SPECIALIZE(scomplex, true) EIGEN_BLAS_TRMM_SPECIALIZE(scomplex, false) // implements col-major += alpha * op(triangular) * op(general) #define EIGEN_BLAS_TRMM_L(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \ template <typename Index, int Mode, \ int LhsStorageOrder, bool ConjugateLhs, \ int RhsStorageOrder, bool ConjugateRhs> \ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,true, \ LhsStorageOrder,ConjugateLhs,RhsStorageOrder,ConjugateRhs,ColMajor> \ { \ enum { \ IsLower = (Mode&Lower) == Lower, \ SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \ IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \ IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \ LowUp = IsLower ? Lower : Upper, \ conjA = ((LhsStorageOrder==ColMajor) && ConjugateLhs) ? 1 : 0 \ }; \ \ static void run( \ Index _rows, Index _cols, Index _depth, \ const EIGTYPE* _lhs, Index lhsStride, \ const EIGTYPE* _rhs, Index rhsStride, \ EIGTYPE* res, Index resStride, \ EIGTYPE alpha, level3_blocking<EIGTYPE,EIGTYPE>& blocking) \ { \ Index diagSize = (std::min)(_rows,_depth); \ Index rows = IsLower ? _rows : diagSize; \ Index depth = IsLower ? diagSize : _depth; \ Index cols = _cols; \ \ typedef Matrix<EIGTYPE, Dynamic, Dynamic, LhsStorageOrder> MatrixLhs; \ typedef Matrix<EIGTYPE, Dynamic, Dynamic, RhsStorageOrder> MatrixRhs; \ \ /* Non-square case - doesn't fit to BLAS ?TRMM. Fall to default triangular product or call BLAS ?GEMM*/ \ if (rows != depth) { \ \ /* FIXME handle mkl_domain_get_max_threads */ \ /*int nthr = mkl_domain_get_max_threads(EIGEN_BLAS_DOMAIN_BLAS);*/ int nthr = 1;\ \ if (((nthr==1) && (((std::max)(rows,depth)-diagSize)/(double)diagSize < 0.5))) { \ /* Most likely no benefit to call TRMM or GEMM from BLAS */ \ product_triangular_matrix_matrix<EIGTYPE,Index,Mode,true, \ LhsStorageOrder,ConjugateLhs, RhsStorageOrder, ConjugateRhs, ColMajor, BuiltIn>::run( \ _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \ /*std::cout << "TRMM_L: A is not square! Go to Eigen TRMM implementation!\n";*/ \ } else { \ /* Make sense to call GEMM */ \ Map<const MatrixLhs, 0, OuterStride<> > lhsMap(_lhs,rows,depth,OuterStride<>(lhsStride)); \ MatrixLhs aa_tmp=lhsMap.template triangularView<Mode>(); \ BlasIndex aStride = convert_index<BlasIndex>(aa_tmp.outerStride()); \ gemm_blocking_space<ColMajor,EIGTYPE,EIGTYPE,Dynamic,Dynamic,Dynamic> gemm_blocking(_rows,_cols,_depth, 1, true); \ general_matrix_matrix_product<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,RhsStorageOrder,ConjugateRhs,ColMajor>::run( \ rows, cols, depth, aa_tmp.data(), aStride, _rhs, rhsStride, res, resStride, alpha, gemm_blocking, 0); \ \ /*std::cout << "TRMM_L: A is not square! Go to BLAS GEMM implementation! " << nthr<<" \n";*/ \ } \ return; \ } \ char side = 'L', transa, uplo, diag = 'N'; \ EIGTYPE *b; \ const EIGTYPE *a; \ BlasIndex m, n, lda, ldb; \ \ /* Set m, n */ \ m = convert_index<BlasIndex>(diagSize); \ n = convert_index<BlasIndex>(cols); \ \ /* Set trans */ \ transa = (LhsStorageOrder==RowMajor) ? ((ConjugateLhs) ? 'C' : 'T') : 'N'; \ \ /* Set b, ldb */ \ Map<const MatrixRhs, 0, OuterStride<> > rhs(_rhs,depth,cols,OuterStride<>(rhsStride)); \ MatrixX##EIGPREFIX b_tmp; \ \ if (ConjugateRhs) b_tmp = rhs.conjugate(); else b_tmp = rhs; \ b = b_tmp.data(); \ ldb = convert_index<BlasIndex>(b_tmp.outerStride()); \ \ /* Set uplo */ \ uplo = IsLower ? 'L' : 'U'; \ if (LhsStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \ /* Set a, lda */ \ Map<const MatrixLhs, 0, OuterStride<> > lhs(_lhs,rows,depth,OuterStride<>(lhsStride)); \ MatrixLhs a_tmp; \ \ if ((conjA!=0) || (SetDiag==0)) { \ if (conjA) a_tmp = lhs.conjugate(); else a_tmp = lhs; \ if (IsZeroDiag) \ a_tmp.diagonal().setZero(); \ else if (IsUnitDiag) \ a_tmp.diagonal().setOnes();\ a = a_tmp.data(); \ lda = convert_index<BlasIndex>(a_tmp.outerStride()); \ } else { \ a = _lhs; \ lda = convert_index<BlasIndex>(lhsStride); \ } \ /*std::cout << "TRMM_L: A is square! Go to BLAS TRMM implementation! \n";*/ \ /* call ?trmm*/ \ BLASPREFIX##trmm_(&side, &uplo, &transa, &diag, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)b, &ldb); \ \ /* Add op(a_triangular)*b into res*/ \ Map<MatrixX##EIGPREFIX, 0, OuterStride<> > res_tmp(res,rows,cols,OuterStride<>(resStride)); \ res_tmp=res_tmp+b_tmp; \ } \ }; EIGEN_BLAS_TRMM_L(double, double, d, d) EIGEN_BLAS_TRMM_L(dcomplex, double, cd, z) EIGEN_BLAS_TRMM_L(float, float, f, s) EIGEN_BLAS_TRMM_L(scomplex, float, cf, c) // implements col-major += alpha * op(general) * op(triangular) #define EIGEN_BLAS_TRMM_R(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \ template <typename Index, int Mode, \ int LhsStorageOrder, bool ConjugateLhs, \ int RhsStorageOrder, bool ConjugateRhs> \ struct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,false, \ LhsStorageOrder,ConjugateLhs,RhsStorageOrder,ConjugateRhs,ColMajor> \ { \ enum { \ IsLower = (Mode&Lower) == Lower, \ SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \ IsUnitDiag = (Mode&UnitDiag) ? 1 : 0, \ IsZeroDiag = (Mode&ZeroDiag) ? 1 : 0, \ LowUp = IsLower ? Lower : Upper, \ conjA = ((RhsStorageOrder==ColMajor) && ConjugateRhs) ? 1 : 0 \ }; \ \ static void run( \ Index _rows, Index _cols, Index _depth, \ const EIGTYPE* _lhs, Index lhsStride, \ const EIGTYPE* _rhs, Index rhsStride, \ EIGTYPE* res, Index resStride, \ EIGTYPE alpha, level3_blocking<EIGTYPE,EIGTYPE>& blocking) \ { \ Index diagSize = (std::min)(_cols,_depth); \ Index rows = _rows; \ Index depth = IsLower ? _depth : diagSize; \ Index cols = IsLower ? diagSize : _cols; \ \ typedef Matrix<EIGTYPE, Dynamic, Dynamic, LhsStorageOrder> MatrixLhs; \ typedef Matrix<EIGTYPE, Dynamic, Dynamic, RhsStorageOrder> MatrixRhs; \ \ /* Non-square case - doesn't fit to BLAS ?TRMM. Fall to default triangular product or call BLAS ?GEMM*/ \ if (cols != depth) { \ \ int nthr = 1 /*mkl_domain_get_max_threads(EIGEN_BLAS_DOMAIN_BLAS)*/; \ \ if ((nthr==1) && (((std::max)(cols,depth)-diagSize)/(double)diagSize < 0.5)) { \ /* Most likely no benefit to call TRMM or GEMM from BLAS*/ \ product_triangular_matrix_matrix<EIGTYPE,Index,Mode,false, \ LhsStorageOrder,ConjugateLhs, RhsStorageOrder, ConjugateRhs, ColMajor, BuiltIn>::run( \ _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \ /*std::cout << "TRMM_R: A is not square! Go to Eigen TRMM implementation!\n";*/ \ } else { \ /* Make sense to call GEMM */ \ Map<const MatrixRhs, 0, OuterStride<> > rhsMap(_rhs,depth,cols, OuterStride<>(rhsStride)); \ MatrixRhs aa_tmp=rhsMap.template triangularView<Mode>(); \ BlasIndex aStride = convert_index<BlasIndex>(aa_tmp.outerStride()); \ gemm_blocking_space<ColMajor,EIGTYPE,EIGTYPE,Dynamic,Dynamic,Dynamic> gemm_blocking(_rows,_cols,_depth, 1, true); \ general_matrix_matrix_product<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,RhsStorageOrder,ConjugateRhs,ColMajor>::run( \ rows, cols, depth, _lhs, lhsStride, aa_tmp.data(), aStride, res, resStride, alpha, gemm_blocking, 0); \ \ /*std::cout << "TRMM_R: A is not square! Go to BLAS GEMM implementation! " << nthr<<" \n";*/ \ } \ return; \ } \ char side = 'R', transa, uplo, diag = 'N'; \ EIGTYPE *b; \ const EIGTYPE *a; \ BlasIndex m, n, lda, ldb; \ \ /* Set m, n */ \ m = convert_index<BlasIndex>(rows); \ n = convert_index<BlasIndex>(diagSize); \ \ /* Set trans */ \ transa = (RhsStorageOrder==RowMajor) ? ((ConjugateRhs) ? 'C' : 'T') : 'N'; \ \ /* Set b, ldb */ \ Map<const MatrixLhs, 0, OuterStride<> > lhs(_lhs,rows,depth,OuterStride<>(lhsStride)); \ MatrixX##EIGPREFIX b_tmp; \ \ if (ConjugateLhs) b_tmp = lhs.conjugate(); else b_tmp = lhs; \ b = b_tmp.data(); \ ldb = convert_index<BlasIndex>(b_tmp.outerStride()); \ \ /* Set uplo */ \ uplo = IsLower ? 'L' : 'U'; \ if (RhsStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \ /* Set a, lda */ \ Map<const MatrixRhs, 0, OuterStride<> > rhs(_rhs,depth,cols, OuterStride<>(rhsStride)); \ MatrixRhs a_tmp; \ \ if ((conjA!=0) || (SetDiag==0)) { \ if (conjA) a_tmp = rhs.conjugate(); else a_tmp = rhs; \ if (IsZeroDiag) \ a_tmp.diagonal().setZero(); \ else if (IsUnitDiag) \ a_tmp.diagonal().setOnes();\ a = a_tmp.data(); \ lda = convert_index<BlasIndex>(a_tmp.outerStride()); \ } else { \ a = _rhs; \ lda = convert_index<BlasIndex>(rhsStride); \ } \ /*std::cout << "TRMM_R: A is square! Go to BLAS TRMM implementation! \n";*/ \ /* call ?trmm*/ \ BLASPREFIX##trmm_(&side, &uplo, &transa, &diag, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)b, &ldb); \ \ /* Add op(a_triangular)*b into res*/ \ Map<MatrixX##EIGPREFIX, 0, OuterStride<> > res_tmp(res,rows,cols,OuterStride<>(resStride)); \ res_tmp=res_tmp+b_tmp; \ } \ }; EIGEN_BLAS_TRMM_R(double, double, d, d) EIGEN_BLAS_TRMM_R(dcomplex, double, cd, z) EIGEN_BLAS_TRMM_R(float, float, f, s) EIGEN_BLAS_TRMM_R(scomplex, float, cf, c) } // end namespace internal } // end namespace Eigen #endif // EIGEN_TRIANGULAR_MATRIX_MATRIX_BLAS_H
13,238
42.693069
134
h
abess
abess-master/python/include/Eigen/src/Core/products/TriangularSolverVector.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_TRIANGULAR_SOLVER_VECTOR_H #define EIGEN_TRIANGULAR_SOLVER_VECTOR_H namespace Eigen { namespace internal { template<typename LhsScalar, typename RhsScalar, typename Index, int Mode, bool Conjugate, int StorageOrder> struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheRight, Mode, Conjugate, StorageOrder> { static void run(Index size, const LhsScalar* _lhs, Index lhsStride, RhsScalar* rhs) { triangular_solve_vector<LhsScalar,RhsScalar,Index,OnTheLeft, ((Mode&Upper)==Upper ? Lower : Upper) | (Mode&UnitDiag), Conjugate,StorageOrder==RowMajor?ColMajor:RowMajor >::run(size, _lhs, lhsStride, rhs); } }; // forward and backward substitution, row-major, rhs is a vector template<typename LhsScalar, typename RhsScalar, typename Index, int Mode, bool Conjugate> struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Conjugate, RowMajor> { enum { IsLower = ((Mode&Lower)==Lower) }; static void run(Index size, const LhsScalar* _lhs, Index lhsStride, RhsScalar* rhs) { typedef Map<const Matrix<LhsScalar,Dynamic,Dynamic,RowMajor>, 0, OuterStride<> > LhsMap; const LhsMap lhs(_lhs,size,size,OuterStride<>(lhsStride)); typedef const_blas_data_mapper<LhsScalar,Index,RowMajor> LhsMapper; typedef const_blas_data_mapper<RhsScalar,Index,ColMajor> RhsMapper; typename internal::conditional< Conjugate, const CwiseUnaryOp<typename internal::scalar_conjugate_op<LhsScalar>,LhsMap>, const LhsMap&> ::type cjLhs(lhs); static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH; for(Index pi=IsLower ? 0 : size; IsLower ? pi<size : pi>0; IsLower ? pi+=PanelWidth : pi-=PanelWidth) { Index actualPanelWidth = (std::min)(IsLower ? size - pi : pi, PanelWidth); Index r = IsLower ? pi : size - pi; // remaining size if (r > 0) { // let's directly call the low level product function because: // 1 - it is faster to compile // 2 - it is slighlty faster at runtime Index startRow = IsLower ? pi : pi-actualPanelWidth; Index startCol = IsLower ? 0 : pi; general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,Conjugate,RhsScalar,RhsMapper,false>::run( actualPanelWidth, r, LhsMapper(&lhs.coeffRef(startRow,startCol), lhsStride), RhsMapper(rhs + startCol, 1), rhs + startRow, 1, RhsScalar(-1)); } for(Index k=0; k<actualPanelWidth; ++k) { Index i = IsLower ? pi+k : pi-k-1; Index s = IsLower ? pi : i+1; if (k>0) rhs[i] -= (cjLhs.row(i).segment(s,k).transpose().cwiseProduct(Map<const Matrix<RhsScalar,Dynamic,1> >(rhs+s,k))).sum(); if(!(Mode & UnitDiag)) rhs[i] /= cjLhs(i,i); } } } }; // forward and backward substitution, column-major, rhs is a vector template<typename LhsScalar, typename RhsScalar, typename Index, int Mode, bool Conjugate> struct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Conjugate, ColMajor> { enum { IsLower = ((Mode&Lower)==Lower) }; static void run(Index size, const LhsScalar* _lhs, Index lhsStride, RhsScalar* rhs) { typedef Map<const Matrix<LhsScalar,Dynamic,Dynamic,ColMajor>, 0, OuterStride<> > LhsMap; const LhsMap lhs(_lhs,size,size,OuterStride<>(lhsStride)); typedef const_blas_data_mapper<LhsScalar,Index,ColMajor> LhsMapper; typedef const_blas_data_mapper<RhsScalar,Index,ColMajor> RhsMapper; typename internal::conditional<Conjugate, const CwiseUnaryOp<typename internal::scalar_conjugate_op<LhsScalar>,LhsMap>, const LhsMap& >::type cjLhs(lhs); static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH; for(Index pi=IsLower ? 0 : size; IsLower ? pi<size : pi>0; IsLower ? pi+=PanelWidth : pi-=PanelWidth) { Index actualPanelWidth = (std::min)(IsLower ? size - pi : pi, PanelWidth); Index startBlock = IsLower ? pi : pi-actualPanelWidth; Index endBlock = IsLower ? pi + actualPanelWidth : 0; for(Index k=0; k<actualPanelWidth; ++k) { Index i = IsLower ? pi+k : pi-k-1; if(!(Mode & UnitDiag)) rhs[i] /= cjLhs.coeff(i,i); Index r = actualPanelWidth - k - 1; // remaining size Index s = IsLower ? i+1 : i-r; if (r>0) Map<Matrix<RhsScalar,Dynamic,1> >(rhs+s,r) -= rhs[i] * cjLhs.col(i).segment(s,r); } Index r = IsLower ? size - endBlock : startBlock; // remaining size if (r > 0) { // let's directly call the low level product function because: // 1 - it is faster to compile // 2 - it is slighlty faster at runtime general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,Conjugate,RhsScalar,RhsMapper,false>::run( r, actualPanelWidth, LhsMapper(&lhs.coeffRef(endBlock,startBlock), lhsStride), RhsMapper(rhs+startBlock, 1), rhs+endBlock, 1, RhsScalar(-1)); } } } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_TRIANGULAR_SOLVER_VECTOR_H
5,741
38.328767
129
h
abess
abess-master/python/include/Eigen/src/Core/util/MKL_support.h
/* Copyright (c) 2011, Intel Corporation. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************** * Content : Eigen bindings to Intel(R) MKL * Include file with common MKL declarations ******************************************************************************** */ #ifndef EIGEN_MKL_SUPPORT_H #define EIGEN_MKL_SUPPORT_H #ifdef EIGEN_USE_MKL_ALL #ifndef EIGEN_USE_BLAS #define EIGEN_USE_BLAS #endif #ifndef EIGEN_USE_LAPACKE #define EIGEN_USE_LAPACKE #endif #ifndef EIGEN_USE_MKL_VML #define EIGEN_USE_MKL_VML #endif #endif #ifdef EIGEN_USE_LAPACKE_STRICT #define EIGEN_USE_LAPACKE #endif #if defined(EIGEN_USE_MKL_VML) #define EIGEN_USE_MKL #endif #if defined EIGEN_USE_MKL # include <mkl.h> /*Check IMKL version for compatibility: < 10.3 is not usable with Eigen*/ # ifndef INTEL_MKL_VERSION # undef EIGEN_USE_MKL /* INTEL_MKL_VERSION is not even defined on older versions */ # elif INTEL_MKL_VERSION < 100305 /* the intel-mkl-103-release-notes say this was when the lapacke.h interface was added*/ # undef EIGEN_USE_MKL # endif # ifndef EIGEN_USE_MKL /*If the MKL version is too old, undef everything*/ # undef EIGEN_USE_MKL_ALL # undef EIGEN_USE_LAPACKE # undef EIGEN_USE_MKL_VML # undef EIGEN_USE_LAPACKE_STRICT # undef EIGEN_USE_LAPACKE # endif #endif #if defined EIGEN_USE_MKL #define EIGEN_MKL_VML_THRESHOLD 128 /* MKL_DOMAIN_BLAS, etc are defined only in 10.3 update 7 */ /* MKL_BLAS, etc are not defined in 11.2 */ #ifdef MKL_DOMAIN_ALL #define EIGEN_MKL_DOMAIN_ALL MKL_DOMAIN_ALL #else #define EIGEN_MKL_DOMAIN_ALL MKL_ALL #endif #ifdef MKL_DOMAIN_BLAS #define EIGEN_MKL_DOMAIN_BLAS MKL_DOMAIN_BLAS #else #define EIGEN_MKL_DOMAIN_BLAS MKL_BLAS #endif #ifdef MKL_DOMAIN_FFT #define EIGEN_MKL_DOMAIN_FFT MKL_DOMAIN_FFT #else #define EIGEN_MKL_DOMAIN_FFT MKL_FFT #endif #ifdef MKL_DOMAIN_VML #define EIGEN_MKL_DOMAIN_VML MKL_DOMAIN_VML #else #define EIGEN_MKL_DOMAIN_VML MKL_VML #endif #ifdef MKL_DOMAIN_PARDISO #define EIGEN_MKL_DOMAIN_PARDISO MKL_DOMAIN_PARDISO #else #define EIGEN_MKL_DOMAIN_PARDISO MKL_PARDISO #endif #endif namespace Eigen { typedef std::complex<double> dcomplex; typedef std::complex<float> scomplex; #if defined(EIGEN_USE_MKL) typedef MKL_INT BlasIndex; #else typedef int BlasIndex; #endif } // end namespace Eigen #if defined(EIGEN_USE_BLAS) #include "../../misc/blas.h" #endif #endif // EIGEN_MKL_SUPPORT_H
3,970
29.782946
127
h
abess
abess-master/python/include/Eigen/src/Core/util/ReenableStupidWarnings.h
#ifdef EIGEN_WARNINGS_DISABLED #undef EIGEN_WARNINGS_DISABLED #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS #ifdef _MSC_VER #pragma warning( pop ) #elif defined __INTEL_COMPILER #pragma warning pop #elif defined __clang__ #pragma clang diagnostic pop #elif defined __GNUC__ && __GNUC__>=6 #pragma GCC diagnostic pop #endif #if defined __NVCC__ // Don't reenable the diagnostic messages, as it turns out these messages need // to be disabled at the point of the template instantiation (i.e the user code) // otherwise they'll be triggered by nvcc. // #pragma diag_default code_is_unreachable // #pragma diag_default initialization_not_reachable // #pragma diag_default 2651 // #pragma diag_default 2653 #endif #endif #endif // EIGEN_WARNINGS_DISABLED
809
27.928571
83
h
abess
abess-master/python/include/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2010 Gael Guennebaud <[email protected]> // Copyright (C) 2010 Jitse Niesen <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERALIZEDSELFADJOINTEIGENSOLVER_H #define EIGEN_GENERALIZEDSELFADJOINTEIGENSOLVER_H #include "./Tridiagonalization.h" namespace Eigen { /** \eigenvalues_module \ingroup Eigenvalues_Module * * * \class GeneralizedSelfAdjointEigenSolver * * \brief Computes eigenvalues and eigenvectors of the generalized selfadjoint eigen problem * * \tparam _MatrixType the type of the matrix of which we are computing the * eigendecomposition; this is expected to be an instantiation of the Matrix * class template. * * This class solves the generalized eigenvalue problem * \f$ Av = \lambda Bv \f$. In this case, the matrix \f$ A \f$ should be * selfadjoint and the matrix \f$ B \f$ should be positive definite. * * Only the \b lower \b triangular \b part of the input matrix is referenced. * * Call the function compute() to compute the eigenvalues and eigenvectors of * a given matrix. Alternatively, you can use the * GeneralizedSelfAdjointEigenSolver(const MatrixType&, const MatrixType&, int) * constructor which computes the eigenvalues and eigenvectors at construction time. * Once the eigenvalue and eigenvectors are computed, they can be retrieved with the eigenvalues() * and eigenvectors() functions. * * The documentation for GeneralizedSelfAdjointEigenSolver(const MatrixType&, const MatrixType&, int) * contains an example of the typical use of this class. * * \sa class SelfAdjointEigenSolver, class EigenSolver, class ComplexEigenSolver */ template<typename _MatrixType> class GeneralizedSelfAdjointEigenSolver : public SelfAdjointEigenSolver<_MatrixType> { typedef SelfAdjointEigenSolver<_MatrixType> Base; public: typedef _MatrixType MatrixType; /** \brief Default constructor for fixed-size matrices. * * The default constructor is useful in cases in which the user intends to * perform decompositions via compute(). This constructor * can only be used if \p _MatrixType is a fixed-size matrix; use * GeneralizedSelfAdjointEigenSolver(Index) for dynamic-size matrices. */ GeneralizedSelfAdjointEigenSolver() : Base() {} /** \brief Constructor, pre-allocates memory for dynamic-size matrices. * * \param [in] size Positive integer, size of the matrix whose * eigenvalues and eigenvectors will be computed. * * This constructor is useful for dynamic-size matrices, when the user * intends to perform decompositions via compute(). The \p size * parameter is only used as a hint. It is not an error to give a wrong * \p size, but it may impair performance. * * \sa compute() for an example */ explicit GeneralizedSelfAdjointEigenSolver(Index size) : Base(size) {} /** \brief Constructor; computes generalized eigendecomposition of given matrix pencil. * * \param[in] matA Selfadjoint matrix in matrix pencil. * Only the lower triangular part of the matrix is referenced. * \param[in] matB Positive-definite matrix in matrix pencil. * Only the lower triangular part of the matrix is referenced. * \param[in] options A or-ed set of flags {#ComputeEigenvectors,#EigenvaluesOnly} | {#Ax_lBx,#ABx_lx,#BAx_lx}. * Default is #ComputeEigenvectors|#Ax_lBx. * * This constructor calls compute(const MatrixType&, const MatrixType&, int) * to compute the eigenvalues and (if requested) the eigenvectors of the * generalized eigenproblem \f$ Ax = \lambda B x \f$ with \a matA the * selfadjoint matrix \f$ A \f$ and \a matB the positive definite matrix * \f$ B \f$. Each eigenvector \f$ x \f$ satisfies the property * \f$ x^* B x = 1 \f$. The eigenvectors are computed if * \a options contains ComputeEigenvectors. * * In addition, the two following variants can be solved via \p options: * - \c ABx_lx: \f$ ABx = \lambda x \f$ * - \c BAx_lx: \f$ BAx = \lambda x \f$ * * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.cpp * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.out * * \sa compute(const MatrixType&, const MatrixType&, int) */ GeneralizedSelfAdjointEigenSolver(const MatrixType& matA, const MatrixType& matB, int options = ComputeEigenvectors|Ax_lBx) : Base(matA.cols()) { compute(matA, matB, options); } /** \brief Computes generalized eigendecomposition of given matrix pencil. * * \param[in] matA Selfadjoint matrix in matrix pencil. * Only the lower triangular part of the matrix is referenced. * \param[in] matB Positive-definite matrix in matrix pencil. * Only the lower triangular part of the matrix is referenced. * \param[in] options A or-ed set of flags {#ComputeEigenvectors,#EigenvaluesOnly} | {#Ax_lBx,#ABx_lx,#BAx_lx}. * Default is #ComputeEigenvectors|#Ax_lBx. * * \returns Reference to \c *this * * Accoring to \p options, this function computes eigenvalues and (if requested) * the eigenvectors of one of the following three generalized eigenproblems: * - \c Ax_lBx: \f$ Ax = \lambda B x \f$ * - \c ABx_lx: \f$ ABx = \lambda x \f$ * - \c BAx_lx: \f$ BAx = \lambda x \f$ * with \a matA the selfadjoint matrix \f$ A \f$ and \a matB the positive definite * matrix \f$ B \f$. * In addition, each eigenvector \f$ x \f$ satisfies the property \f$ x^* B x = 1 \f$. * * The eigenvalues() function can be used to retrieve * the eigenvalues. If \p options contains ComputeEigenvectors, then the * eigenvectors are also computed and can be retrieved by calling * eigenvectors(). * * The implementation uses LLT to compute the Cholesky decomposition * \f$ B = LL^* \f$ and computes the classical eigendecomposition * of the selfadjoint matrix \f$ L^{-1} A (L^*)^{-1} \f$ if \p options contains Ax_lBx * and of \f$ L^{*} A L \f$ otherwise. This solves the * generalized eigenproblem, because any solution of the generalized * eigenproblem \f$ Ax = \lambda B x \f$ corresponds to a solution * \f$ L^{-1} A (L^*)^{-1} (L^* x) = \lambda (L^* x) \f$ of the * eigenproblem for \f$ L^{-1} A (L^*)^{-1} \f$. Similar statements * can be made for the two other variants. * * Example: \include SelfAdjointEigenSolver_compute_MatrixType2.cpp * Output: \verbinclude SelfAdjointEigenSolver_compute_MatrixType2.out * * \sa GeneralizedSelfAdjointEigenSolver(const MatrixType&, const MatrixType&, int) */ GeneralizedSelfAdjointEigenSolver& compute(const MatrixType& matA, const MatrixType& matB, int options = ComputeEigenvectors|Ax_lBx); protected: }; template<typename MatrixType> GeneralizedSelfAdjointEigenSolver<MatrixType>& GeneralizedSelfAdjointEigenSolver<MatrixType>:: compute(const MatrixType& matA, const MatrixType& matB, int options) { eigen_assert(matA.cols()==matA.rows() && matB.rows()==matA.rows() && matB.cols()==matB.rows()); eigen_assert((options&~(EigVecMask|GenEigMask))==0 && (options&EigVecMask)!=EigVecMask && ((options&GenEigMask)==0 || (options&GenEigMask)==Ax_lBx || (options&GenEigMask)==ABx_lx || (options&GenEigMask)==BAx_lx) && "invalid option parameter"); bool computeEigVecs = ((options&EigVecMask)==0) || ((options&EigVecMask)==ComputeEigenvectors); // Compute the cholesky decomposition of matB = L L' = U'U LLT<MatrixType> cholB(matB); int type = (options&GenEigMask); if(type==0) type = Ax_lBx; if(type==Ax_lBx) { // compute C = inv(L) A inv(L') MatrixType matC = matA.template selfadjointView<Lower>(); cholB.matrixL().template solveInPlace<OnTheLeft>(matC); cholB.matrixU().template solveInPlace<OnTheRight>(matC); Base::compute(matC, computeEigVecs ? ComputeEigenvectors : EigenvaluesOnly ); // transform back the eigen vectors: evecs = inv(U) * evecs if(computeEigVecs) cholB.matrixU().solveInPlace(Base::m_eivec); } else if(type==ABx_lx) { // compute C = L' A L MatrixType matC = matA.template selfadjointView<Lower>(); matC = matC * cholB.matrixL(); matC = cholB.matrixU() * matC; Base::compute(matC, computeEigVecs ? ComputeEigenvectors : EigenvaluesOnly); // transform back the eigen vectors: evecs = inv(U) * evecs if(computeEigVecs) cholB.matrixU().solveInPlace(Base::m_eivec); } else if(type==BAx_lx) { // compute C = L' A L MatrixType matC = matA.template selfadjointView<Lower>(); matC = matC * cholB.matrixL(); matC = cholB.matrixU() * matC; Base::compute(matC, computeEigVecs ? ComputeEigenvectors : EigenvaluesOnly); // transform back the eigen vectors: evecs = L * evecs if(computeEigVecs) Base::m_eivec = cholB.matrixL() * Base::m_eivec; } return *this; } } // end namespace Eigen #endif // EIGEN_GENERALIZEDSELFADJOINTEIGENSOLVER_H
9,715
41.801762
117
h
abess
abess-master/python/include/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <[email protected]> // Copyright (C) 2010 Jitse Niesen <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATRIXBASEEIGENVALUES_H #define EIGEN_MATRIXBASEEIGENVALUES_H namespace Eigen { namespace internal { template<typename Derived, bool IsComplex> struct eigenvalues_selector { // this is the implementation for the case IsComplex = true static inline typename MatrixBase<Derived>::EigenvaluesReturnType const run(const MatrixBase<Derived>& m) { typedef typename Derived::PlainObject PlainObject; PlainObject m_eval(m); return ComplexEigenSolver<PlainObject>(m_eval, false).eigenvalues(); } }; template<typename Derived> struct eigenvalues_selector<Derived, false> { static inline typename MatrixBase<Derived>::EigenvaluesReturnType const run(const MatrixBase<Derived>& m) { typedef typename Derived::PlainObject PlainObject; PlainObject m_eval(m); return EigenSolver<PlainObject>(m_eval, false).eigenvalues(); } }; } // end namespace internal /** \brief Computes the eigenvalues of a matrix * \returns Column vector containing the eigenvalues. * * \eigenvalues_module * This function computes the eigenvalues with the help of the EigenSolver * class (for real matrices) or the ComplexEigenSolver class (for complex * matrices). * * The eigenvalues are repeated according to their algebraic multiplicity, * so there are as many eigenvalues as rows in the matrix. * * The SelfAdjointView class provides a better algorithm for selfadjoint * matrices. * * Example: \include MatrixBase_eigenvalues.cpp * Output: \verbinclude MatrixBase_eigenvalues.out * * \sa EigenSolver::eigenvalues(), ComplexEigenSolver::eigenvalues(), * SelfAdjointView::eigenvalues() */ template<typename Derived> inline typename MatrixBase<Derived>::EigenvaluesReturnType MatrixBase<Derived>::eigenvalues() const { typedef typename internal::traits<Derived>::Scalar Scalar; return internal::eigenvalues_selector<Derived, NumTraits<Scalar>::IsComplex>::run(derived()); } /** \brief Computes the eigenvalues of a matrix * \returns Column vector containing the eigenvalues. * * \eigenvalues_module * This function computes the eigenvalues with the help of the * SelfAdjointEigenSolver class. The eigenvalues are repeated according to * their algebraic multiplicity, so there are as many eigenvalues as rows in * the matrix. * * Example: \include SelfAdjointView_eigenvalues.cpp * Output: \verbinclude SelfAdjointView_eigenvalues.out * * \sa SelfAdjointEigenSolver::eigenvalues(), MatrixBase::eigenvalues() */ template<typename MatrixType, unsigned int UpLo> inline typename SelfAdjointView<MatrixType, UpLo>::EigenvaluesReturnType SelfAdjointView<MatrixType, UpLo>::eigenvalues() const { typedef typename SelfAdjointView<MatrixType, UpLo>::PlainObject PlainObject; PlainObject thisAsMatrix(*this); return SelfAdjointEigenSolver<PlainObject>(thisAsMatrix, false).eigenvalues(); } /** \brief Computes the L2 operator norm * \returns Operator norm of the matrix. * * \eigenvalues_module * This function computes the L2 operator norm of a matrix, which is also * known as the spectral norm. The norm of a matrix \f$ A \f$ is defined to be * \f[ \|A\|_2 = \max_x \frac{\|Ax\|_2}{\|x\|_2} \f] * where the maximum is over all vectors and the norm on the right is the * Euclidean vector norm. The norm equals the largest singular value, which is * the square root of the largest eigenvalue of the positive semi-definite * matrix \f$ A^*A \f$. * * The current implementation uses the eigenvalues of \f$ A^*A \f$, as computed * by SelfAdjointView::eigenvalues(), to compute the operator norm of a * matrix. The SelfAdjointView class provides a better algorithm for * selfadjoint matrices. * * Example: \include MatrixBase_operatorNorm.cpp * Output: \verbinclude MatrixBase_operatorNorm.out * * \sa SelfAdjointView::eigenvalues(), SelfAdjointView::operatorNorm() */ template<typename Derived> inline typename MatrixBase<Derived>::RealScalar MatrixBase<Derived>::operatorNorm() const { using std::sqrt; typename Derived::PlainObject m_eval(derived()); // FIXME if it is really guaranteed that the eigenvalues are already sorted, // then we don't need to compute a maxCoeff() here, comparing the 1st and last ones is enough. return sqrt((m_eval*m_eval.adjoint()) .eval() .template selfadjointView<Lower>() .eigenvalues() .maxCoeff() ); } /** \brief Computes the L2 operator norm * \returns Operator norm of the matrix. * * \eigenvalues_module * This function computes the L2 operator norm of a self-adjoint matrix. For a * self-adjoint matrix, the operator norm is the largest eigenvalue. * * The current implementation uses the eigenvalues of the matrix, as computed * by eigenvalues(), to compute the operator norm of the matrix. * * Example: \include SelfAdjointView_operatorNorm.cpp * Output: \verbinclude SelfAdjointView_operatorNorm.out * * \sa eigenvalues(), MatrixBase::operatorNorm() */ template<typename MatrixType, unsigned int UpLo> inline typename SelfAdjointView<MatrixType, UpLo>::RealScalar SelfAdjointView<MatrixType, UpLo>::operatorNorm() const { return eigenvalues().cwiseAbs().maxCoeff(); } } // end namespace Eigen #endif
5,679
34.279503
96
h
abess
abess-master/python/include/Eigen/src/Eigenvalues/RealQZ.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Alexey Korepanov <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_REAL_QZ_H #define EIGEN_REAL_QZ_H namespace Eigen { /** \eigenvalues_module \ingroup Eigenvalues_Module * * * \class RealQZ * * \brief Performs a real QZ decomposition of a pair of square matrices * * \tparam _MatrixType the type of the matrix of which we are computing the * real QZ decomposition; this is expected to be an instantiation of the * Matrix class template. * * Given a real square matrices A and B, this class computes the real QZ * decomposition: \f$ A = Q S Z \f$, \f$ B = Q T Z \f$ where Q and Z are * real orthogonal matrixes, T is upper-triangular matrix, and S is upper * quasi-triangular matrix. An orthogonal matrix is a matrix whose * inverse is equal to its transpose, \f$ U^{-1} = U^T \f$. A quasi-triangular * matrix is a block-triangular matrix whose diagonal consists of 1-by-1 * blocks and 2-by-2 blocks where further reduction is impossible due to * complex eigenvalues. * * The eigenvalues of the pencil \f$ A - z B \f$ can be obtained from * 1x1 and 2x2 blocks on the diagonals of S and T. * * Call the function compute() to compute the real QZ decomposition of a * given pair of matrices. Alternatively, you can use the * RealQZ(const MatrixType& B, const MatrixType& B, bool computeQZ) * constructor which computes the real QZ decomposition at construction * time. Once the decomposition is computed, you can use the matrixS(), * matrixT(), matrixQ() and matrixZ() functions to retrieve the matrices * S, T, Q and Z in the decomposition. If computeQZ==false, some time * is saved by not computing matrices Q and Z. * * Example: \include RealQZ_compute.cpp * Output: \include RealQZ_compute.out * * \note The implementation is based on the algorithm in "Matrix Computations" * by Gene H. Golub and Charles F. Van Loan, and a paper "An algorithm for * generalized eigenvalue problems" by C.B.Moler and G.W.Stewart. * * \sa class RealSchur, class ComplexSchur, class EigenSolver, class ComplexEigenSolver */ template<typename _MatrixType> class RealQZ { public: typedef _MatrixType MatrixType; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, Options = MatrixType::Options, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; typedef typename MatrixType::Scalar Scalar; typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar; typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType; typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType; /** \brief Default constructor. * * \param [in] size Positive integer, size of the matrix whose QZ decomposition will be computed. * * The default constructor is useful in cases in which the user intends to * perform decompositions via compute(). The \p size parameter is only * used as a hint. It is not an error to give a wrong \p size, but it may * impair performance. * * \sa compute() for an example. */ explicit RealQZ(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime) : m_S(size, size), m_T(size, size), m_Q(size, size), m_Z(size, size), m_workspace(size*2), m_maxIters(400), m_isInitialized(false) { } /** \brief Constructor; computes real QZ decomposition of given matrices * * \param[in] A Matrix A. * \param[in] B Matrix B. * \param[in] computeQZ If false, A and Z are not computed. * * This constructor calls compute() to compute the QZ decomposition. */ RealQZ(const MatrixType& A, const MatrixType& B, bool computeQZ = true) : m_S(A.rows(),A.cols()), m_T(A.rows(),A.cols()), m_Q(A.rows(),A.cols()), m_Z(A.rows(),A.cols()), m_workspace(A.rows()*2), m_maxIters(400), m_isInitialized(false) { compute(A, B, computeQZ); } /** \brief Returns matrix Q in the QZ decomposition. * * \returns A const reference to the matrix Q. */ const MatrixType& matrixQ() const { eigen_assert(m_isInitialized && "RealQZ is not initialized."); eigen_assert(m_computeQZ && "The matrices Q and Z have not been computed during the QZ decomposition."); return m_Q; } /** \brief Returns matrix Z in the QZ decomposition. * * \returns A const reference to the matrix Z. */ const MatrixType& matrixZ() const { eigen_assert(m_isInitialized && "RealQZ is not initialized."); eigen_assert(m_computeQZ && "The matrices Q and Z have not been computed during the QZ decomposition."); return m_Z; } /** \brief Returns matrix S in the QZ decomposition. * * \returns A const reference to the matrix S. */ const MatrixType& matrixS() const { eigen_assert(m_isInitialized && "RealQZ is not initialized."); return m_S; } /** \brief Returns matrix S in the QZ decomposition. * * \returns A const reference to the matrix S. */ const MatrixType& matrixT() const { eigen_assert(m_isInitialized && "RealQZ is not initialized."); return m_T; } /** \brief Computes QZ decomposition of given matrix. * * \param[in] A Matrix A. * \param[in] B Matrix B. * \param[in] computeQZ If false, A and Z are not computed. * \returns Reference to \c *this */ RealQZ& compute(const MatrixType& A, const MatrixType& B, bool computeQZ = true); /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, \c NoConvergence otherwise. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "RealQZ is not initialized."); return m_info; } /** \brief Returns number of performed QR-like iterations. */ Index iterations() const { eigen_assert(m_isInitialized && "RealQZ is not initialized."); return m_global_iter; } /** Sets the maximal number of iterations allowed to converge to one eigenvalue * or decouple the problem. */ RealQZ& setMaxIterations(Index maxIters) { m_maxIters = maxIters; return *this; } private: MatrixType m_S, m_T, m_Q, m_Z; Matrix<Scalar,Dynamic,1> m_workspace; ComputationInfo m_info; Index m_maxIters; bool m_isInitialized; bool m_computeQZ; Scalar m_normOfT, m_normOfS; Index m_global_iter; typedef Matrix<Scalar,3,1> Vector3s; typedef Matrix<Scalar,2,1> Vector2s; typedef Matrix<Scalar,2,2> Matrix2s; typedef JacobiRotation<Scalar> JRs; void hessenbergTriangular(); void computeNorms(); Index findSmallSubdiagEntry(Index iu); Index findSmallDiagEntry(Index f, Index l); void splitOffTwoRows(Index i); void pushDownZero(Index z, Index f, Index l); void step(Index f, Index l, Index iter); }; // RealQZ /** \internal Reduces S and T to upper Hessenberg - triangular form */ template<typename MatrixType> void RealQZ<MatrixType>::hessenbergTriangular() { const Index dim = m_S.cols(); // perform QR decomposition of T, overwrite T with R, save Q HouseholderQR<MatrixType> qrT(m_T); m_T = qrT.matrixQR(); m_T.template triangularView<StrictlyLower>().setZero(); m_Q = qrT.householderQ(); // overwrite S with Q* S m_S.applyOnTheLeft(m_Q.adjoint()); // init Z as Identity if (m_computeQZ) m_Z = MatrixType::Identity(dim,dim); // reduce S to upper Hessenberg with Givens rotations for (Index j=0; j<=dim-3; j++) { for (Index i=dim-1; i>=j+2; i--) { JRs G; // kill S(i,j) if(m_S.coeff(i,j) != 0) { G.makeGivens(m_S.coeff(i-1,j), m_S.coeff(i,j), &m_S.coeffRef(i-1, j)); m_S.coeffRef(i,j) = Scalar(0.0); m_S.rightCols(dim-j-1).applyOnTheLeft(i-1,i,G.adjoint()); m_T.rightCols(dim-i+1).applyOnTheLeft(i-1,i,G.adjoint()); // update Q if (m_computeQZ) m_Q.applyOnTheRight(i-1,i,G); } // kill T(i,i-1) if(m_T.coeff(i,i-1)!=Scalar(0)) { G.makeGivens(m_T.coeff(i,i), m_T.coeff(i,i-1), &m_T.coeffRef(i,i)); m_T.coeffRef(i,i-1) = Scalar(0.0); m_S.applyOnTheRight(i,i-1,G); m_T.topRows(i).applyOnTheRight(i,i-1,G); // update Z if (m_computeQZ) m_Z.applyOnTheLeft(i,i-1,G.adjoint()); } } } } /** \internal Computes vector L1 norms of S and T when in Hessenberg-Triangular form already */ template<typename MatrixType> inline void RealQZ<MatrixType>::computeNorms() { const Index size = m_S.cols(); m_normOfS = Scalar(0.0); m_normOfT = Scalar(0.0); for (Index j = 0; j < size; ++j) { m_normOfS += m_S.col(j).segment(0, (std::min)(size,j+2)).cwiseAbs().sum(); m_normOfT += m_T.row(j).segment(j, size - j).cwiseAbs().sum(); } } /** \internal Look for single small sub-diagonal element S(res, res-1) and return res (or 0) */ template<typename MatrixType> inline Index RealQZ<MatrixType>::findSmallSubdiagEntry(Index iu) { using std::abs; Index res = iu; while (res > 0) { Scalar s = abs(m_S.coeff(res-1,res-1)) + abs(m_S.coeff(res,res)); if (s == Scalar(0.0)) s = m_normOfS; if (abs(m_S.coeff(res,res-1)) < NumTraits<Scalar>::epsilon() * s) break; res--; } return res; } /** \internal Look for single small diagonal element T(res, res) for res between f and l, and return res (or f-1) */ template<typename MatrixType> inline Index RealQZ<MatrixType>::findSmallDiagEntry(Index f, Index l) { using std::abs; Index res = l; while (res >= f) { if (abs(m_T.coeff(res,res)) <= NumTraits<Scalar>::epsilon() * m_normOfT) break; res--; } return res; } /** \internal decouple 2x2 diagonal block in rows i, i+1 if eigenvalues are real */ template<typename MatrixType> inline void RealQZ<MatrixType>::splitOffTwoRows(Index i) { using std::abs; using std::sqrt; const Index dim=m_S.cols(); if (abs(m_S.coeff(i+1,i))==Scalar(0)) return; Index j = findSmallDiagEntry(i,i+1); if (j==i-1) { // block of (S T^{-1}) Matrix2s STi = m_T.template block<2,2>(i,i).template triangularView<Upper>(). template solve<OnTheRight>(m_S.template block<2,2>(i,i)); Scalar p = Scalar(0.5)*(STi(0,0)-STi(1,1)); Scalar q = p*p + STi(1,0)*STi(0,1); if (q>=0) { Scalar z = sqrt(q); // one QR-like iteration for ABi - lambda I // is enough - when we know exact eigenvalue in advance, // convergence is immediate JRs G; if (p>=0) G.makeGivens(p + z, STi(1,0)); else G.makeGivens(p - z, STi(1,0)); m_S.rightCols(dim-i).applyOnTheLeft(i,i+1,G.adjoint()); m_T.rightCols(dim-i).applyOnTheLeft(i,i+1,G.adjoint()); // update Q if (m_computeQZ) m_Q.applyOnTheRight(i,i+1,G); G.makeGivens(m_T.coeff(i+1,i+1), m_T.coeff(i+1,i)); m_S.topRows(i+2).applyOnTheRight(i+1,i,G); m_T.topRows(i+2).applyOnTheRight(i+1,i,G); // update Z if (m_computeQZ) m_Z.applyOnTheLeft(i+1,i,G.adjoint()); m_S.coeffRef(i+1,i) = Scalar(0.0); m_T.coeffRef(i+1,i) = Scalar(0.0); } } else { pushDownZero(j,i,i+1); } } /** \internal use zero in T(z,z) to zero S(l,l-1), working in block f..l */ template<typename MatrixType> inline void RealQZ<MatrixType>::pushDownZero(Index z, Index f, Index l) { JRs G; const Index dim = m_S.cols(); for (Index zz=z; zz<l; zz++) { // push 0 down Index firstColS = zz>f ? (zz-1) : zz; G.makeGivens(m_T.coeff(zz, zz+1), m_T.coeff(zz+1, zz+1)); m_S.rightCols(dim-firstColS).applyOnTheLeft(zz,zz+1,G.adjoint()); m_T.rightCols(dim-zz).applyOnTheLeft(zz,zz+1,G.adjoint()); m_T.coeffRef(zz+1,zz+1) = Scalar(0.0); // update Q if (m_computeQZ) m_Q.applyOnTheRight(zz,zz+1,G); // kill S(zz+1, zz-1) if (zz>f) { G.makeGivens(m_S.coeff(zz+1, zz), m_S.coeff(zz+1,zz-1)); m_S.topRows(zz+2).applyOnTheRight(zz, zz-1,G); m_T.topRows(zz+1).applyOnTheRight(zz, zz-1,G); m_S.coeffRef(zz+1,zz-1) = Scalar(0.0); // update Z if (m_computeQZ) m_Z.applyOnTheLeft(zz,zz-1,G.adjoint()); } } // finally kill S(l,l-1) G.makeGivens(m_S.coeff(l,l), m_S.coeff(l,l-1)); m_S.applyOnTheRight(l,l-1,G); m_T.applyOnTheRight(l,l-1,G); m_S.coeffRef(l,l-1)=Scalar(0.0); // update Z if (m_computeQZ) m_Z.applyOnTheLeft(l,l-1,G.adjoint()); } /** \internal QR-like iterative step for block f..l */ template<typename MatrixType> inline void RealQZ<MatrixType>::step(Index f, Index l, Index iter) { using std::abs; const Index dim = m_S.cols(); // x, y, z Scalar x, y, z; if (iter==10) { // Wilkinson ad hoc shift const Scalar a11=m_S.coeff(f+0,f+0), a12=m_S.coeff(f+0,f+1), a21=m_S.coeff(f+1,f+0), a22=m_S.coeff(f+1,f+1), a32=m_S.coeff(f+2,f+1), b12=m_T.coeff(f+0,f+1), b11i=Scalar(1.0)/m_T.coeff(f+0,f+0), b22i=Scalar(1.0)/m_T.coeff(f+1,f+1), a87=m_S.coeff(l-1,l-2), a98=m_S.coeff(l-0,l-1), b77i=Scalar(1.0)/m_T.coeff(l-2,l-2), b88i=Scalar(1.0)/m_T.coeff(l-1,l-1); Scalar ss = abs(a87*b77i) + abs(a98*b88i), lpl = Scalar(1.5)*ss, ll = ss*ss; x = ll + a11*a11*b11i*b11i - lpl*a11*b11i + a12*a21*b11i*b22i - a11*a21*b12*b11i*b11i*b22i; y = a11*a21*b11i*b11i - lpl*a21*b11i + a21*a22*b11i*b22i - a21*a21*b12*b11i*b11i*b22i; z = a21*a32*b11i*b22i; } else if (iter==16) { // another exceptional shift x = m_S.coeff(f,f)/m_T.coeff(f,f)-m_S.coeff(l,l)/m_T.coeff(l,l) + m_S.coeff(l,l-1)*m_T.coeff(l-1,l) / (m_T.coeff(l-1,l-1)*m_T.coeff(l,l)); y = m_S.coeff(f+1,f)/m_T.coeff(f,f); z = 0; } else if (iter>23 && !(iter%8)) { // extremely exceptional shift x = internal::random<Scalar>(-1.0,1.0); y = internal::random<Scalar>(-1.0,1.0); z = internal::random<Scalar>(-1.0,1.0); } else { // Compute the shifts: (x,y,z,0...) = (AB^-1 - l1 I) (AB^-1 - l2 I) e1 // where l1 and l2 are the eigenvalues of the 2x2 matrix C = U V^-1 where // U and V are 2x2 bottom right sub matrices of A and B. Thus: // = AB^-1AB^-1 + l1 l2 I - (l1+l2)(AB^-1) // = AB^-1AB^-1 + det(M) - tr(M)(AB^-1) // Since we are only interested in having x, y, z with a correct ratio, we have: const Scalar a11 = m_S.coeff(f,f), a12 = m_S.coeff(f,f+1), a21 = m_S.coeff(f+1,f), a22 = m_S.coeff(f+1,f+1), a32 = m_S.coeff(f+2,f+1), a88 = m_S.coeff(l-1,l-1), a89 = m_S.coeff(l-1,l), a98 = m_S.coeff(l,l-1), a99 = m_S.coeff(l,l), b11 = m_T.coeff(f,f), b12 = m_T.coeff(f,f+1), b22 = m_T.coeff(f+1,f+1), b88 = m_T.coeff(l-1,l-1), b89 = m_T.coeff(l-1,l), b99 = m_T.coeff(l,l); x = ( (a88/b88 - a11/b11)*(a99/b99 - a11/b11) - (a89/b99)*(a98/b88) + (a98/b88)*(b89/b99)*(a11/b11) ) * (b11/a21) + a12/b22 - (a11/b11)*(b12/b22); y = (a22/b22-a11/b11) - (a21/b11)*(b12/b22) - (a88/b88-a11/b11) - (a99/b99-a11/b11) + (a98/b88)*(b89/b99); z = a32/b22; } JRs G; for (Index k=f; k<=l-2; k++) { // variables for Householder reflections Vector2s essential2; Scalar tau, beta; Vector3s hr(x,y,z); // Q_k to annihilate S(k+1,k-1) and S(k+2,k-1) hr.makeHouseholderInPlace(tau, beta); essential2 = hr.template bottomRows<2>(); Index fc=(std::max)(k-1,Index(0)); // first col to update m_S.template middleRows<3>(k).rightCols(dim-fc).applyHouseholderOnTheLeft(essential2, tau, m_workspace.data()); m_T.template middleRows<3>(k).rightCols(dim-fc).applyHouseholderOnTheLeft(essential2, tau, m_workspace.data()); if (m_computeQZ) m_Q.template middleCols<3>(k).applyHouseholderOnTheRight(essential2, tau, m_workspace.data()); if (k>f) m_S.coeffRef(k+2,k-1) = m_S.coeffRef(k+1,k-1) = Scalar(0.0); // Z_{k1} to annihilate T(k+2,k+1) and T(k+2,k) hr << m_T.coeff(k+2,k+2),m_T.coeff(k+2,k),m_T.coeff(k+2,k+1); hr.makeHouseholderInPlace(tau, beta); essential2 = hr.template bottomRows<2>(); { Index lr = (std::min)(k+4,dim); // last row to update Map<Matrix<Scalar,Dynamic,1> > tmp(m_workspace.data(),lr); // S tmp = m_S.template middleCols<2>(k).topRows(lr) * essential2; tmp += m_S.col(k+2).head(lr); m_S.col(k+2).head(lr) -= tau*tmp; m_S.template middleCols<2>(k).topRows(lr) -= (tau*tmp) * essential2.adjoint(); // T tmp = m_T.template middleCols<2>(k).topRows(lr) * essential2; tmp += m_T.col(k+2).head(lr); m_T.col(k+2).head(lr) -= tau*tmp; m_T.template middleCols<2>(k).topRows(lr) -= (tau*tmp) * essential2.adjoint(); } if (m_computeQZ) { // Z Map<Matrix<Scalar,1,Dynamic> > tmp(m_workspace.data(),dim); tmp = essential2.adjoint()*(m_Z.template middleRows<2>(k)); tmp += m_Z.row(k+2); m_Z.row(k+2) -= tau*tmp; m_Z.template middleRows<2>(k) -= essential2 * (tau*tmp); } m_T.coeffRef(k+2,k) = m_T.coeffRef(k+2,k+1) = Scalar(0.0); // Z_{k2} to annihilate T(k+1,k) G.makeGivens(m_T.coeff(k+1,k+1), m_T.coeff(k+1,k)); m_S.applyOnTheRight(k+1,k,G); m_T.applyOnTheRight(k+1,k,G); // update Z if (m_computeQZ) m_Z.applyOnTheLeft(k+1,k,G.adjoint()); m_T.coeffRef(k+1,k) = Scalar(0.0); // update x,y,z x = m_S.coeff(k+1,k); y = m_S.coeff(k+2,k); if (k < l-2) z = m_S.coeff(k+3,k); } // loop over k // Q_{n-1} to annihilate y = S(l,l-2) G.makeGivens(x,y); m_S.applyOnTheLeft(l-1,l,G.adjoint()); m_T.applyOnTheLeft(l-1,l,G.adjoint()); if (m_computeQZ) m_Q.applyOnTheRight(l-1,l,G); m_S.coeffRef(l,l-2) = Scalar(0.0); // Z_{n-1} to annihilate T(l,l-1) G.makeGivens(m_T.coeff(l,l),m_T.coeff(l,l-1)); m_S.applyOnTheRight(l,l-1,G); m_T.applyOnTheRight(l,l-1,G); if (m_computeQZ) m_Z.applyOnTheLeft(l,l-1,G.adjoint()); m_T.coeffRef(l,l-1) = Scalar(0.0); } template<typename MatrixType> RealQZ<MatrixType>& RealQZ<MatrixType>::compute(const MatrixType& A_in, const MatrixType& B_in, bool computeQZ) { const Index dim = A_in.cols(); eigen_assert (A_in.rows()==dim && A_in.cols()==dim && B_in.rows()==dim && B_in.cols()==dim && "Need square matrices of the same dimension"); m_isInitialized = true; m_computeQZ = computeQZ; m_S = A_in; m_T = B_in; m_workspace.resize(dim*2); m_global_iter = 0; // entrance point: hessenberg triangular decomposition hessenbergTriangular(); // compute L1 vector norms of T, S into m_normOfS, m_normOfT computeNorms(); Index l = dim-1, f, local_iter = 0; while (l>0 && local_iter<m_maxIters) { f = findSmallSubdiagEntry(l); // now rows and columns f..l (including) decouple from the rest of the problem if (f>0) m_S.coeffRef(f,f-1) = Scalar(0.0); if (f == l) // One root found { l--; local_iter = 0; } else if (f == l-1) // Two roots found { splitOffTwoRows(f); l -= 2; local_iter = 0; } else // No convergence yet { // if there's zero on diagonal of T, we can isolate an eigenvalue with Givens rotations Index z = findSmallDiagEntry(f,l); if (z>=f) { // zero found pushDownZero(z,f,l); } else { // We are sure now that S.block(f,f, l-f+1,l-f+1) is underuced upper-Hessenberg // and T.block(f,f, l-f+1,l-f+1) is invertible uper-triangular, which allows to // apply a QR-like iteration to rows and columns f..l. step(f,l, local_iter); local_iter++; m_global_iter++; } } } // check if we converged before reaching iterations limit m_info = (local_iter<m_maxIters) ? Success : NoConvergence; // For each non triangular 2x2 diagonal block of S, // reduce the respective 2x2 diagonal block of T to positive diagonal form using 2x2 SVD. // This step is not mandatory for QZ, but it does help further extraction of eigenvalues/eigenvectors, // and is in par with Lapack/Matlab QZ. if(m_info==Success) { for(Index i=0; i<dim-1; ++i) { if(m_S.coeff(i+1, i) != Scalar(0)) { JacobiRotation<Scalar> j_left, j_right; internal::real_2x2_jacobi_svd(m_T, i, i+1, &j_left, &j_right); // Apply resulting Jacobi rotations m_S.applyOnTheLeft(i,i+1,j_left); m_S.applyOnTheRight(i,i+1,j_right); m_T.applyOnTheLeft(i,i+1,j_left); m_T.applyOnTheRight(i,i+1,j_right); m_T(i+1,i) = m_T(i,i+1) = Scalar(0); if(m_computeQZ) { m_Q.applyOnTheRight(i,i+1,j_left.transpose()); m_Z.applyOnTheLeft(i,i+1,j_right.transpose()); } i++; } } } return *this; } // end compute } // end namespace Eigen #endif //EIGEN_REAL_QZ
23,586
35.010687
121
h
abess
abess-master/python/include/Eigen/src/Geometry/EulerAngles.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_EULERANGLES_H #define EIGEN_EULERANGLES_H namespace Eigen { /** \geometry_module \ingroup Geometry_Module * * * \returns the Euler-angles of the rotation matrix \c *this using the convention defined by the triplet (\a a0,\a a1,\a a2) * * Each of the three parameters \a a0,\a a1,\a a2 represents the respective rotation axis as an integer in {0,1,2}. * For instance, in: * \code Vector3f ea = mat.eulerAngles(2, 0, 2); \endcode * "2" represents the z axis and "0" the x axis, etc. The returned angles are such that * we have the following equality: * \code * mat == AngleAxisf(ea[0], Vector3f::UnitZ()) * * AngleAxisf(ea[1], Vector3f::UnitX()) * * AngleAxisf(ea[2], Vector3f::UnitZ()); \endcode * This corresponds to the right-multiply conventions (with right hand side frames). * * The returned angles are in the ranges [0:pi]x[-pi:pi]x[-pi:pi]. * * \sa class AngleAxis */ template<typename Derived> EIGEN_DEVICE_FUNC inline Matrix<typename MatrixBase<Derived>::Scalar,3,1> MatrixBase<Derived>::eulerAngles(Index a0, Index a1, Index a2) const { EIGEN_USING_STD_MATH(atan2) EIGEN_USING_STD_MATH(sin) EIGEN_USING_STD_MATH(cos) /* Implemented from Graphics Gems IV */ EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived,3,3) Matrix<Scalar,3,1> res; typedef Matrix<typename Derived::Scalar,2,1> Vector2; const Index odd = ((a0+1)%3 == a1) ? 0 : 1; const Index i = a0; const Index j = (a0 + 1 + odd)%3; const Index k = (a0 + 2 - odd)%3; if (a0==a2) { res[0] = atan2(coeff(j,i), coeff(k,i)); if((odd && res[0]<Scalar(0)) || ((!odd) && res[0]>Scalar(0))) { if(res[0] > Scalar(0)) { res[0] -= Scalar(EIGEN_PI); } else { res[0] += Scalar(EIGEN_PI); } Scalar s2 = Vector2(coeff(j,i), coeff(k,i)).norm(); res[1] = -atan2(s2, coeff(i,i)); } else { Scalar s2 = Vector2(coeff(j,i), coeff(k,i)).norm(); res[1] = atan2(s2, coeff(i,i)); } // With a=(0,1,0), we have i=0; j=1; k=2, and after computing the first two angles, // we can compute their respective rotation, and apply its inverse to M. Since the result must // be a rotation around x, we have: // // c2 s1.s2 c1.s2 1 0 0 // 0 c1 -s1 * M = 0 c3 s3 // -s2 s1.c2 c1.c2 0 -s3 c3 // // Thus: m11.c1 - m21.s1 = c3 & m12.c1 - m22.s1 = s3 Scalar s1 = sin(res[0]); Scalar c1 = cos(res[0]); res[2] = atan2(c1*coeff(j,k)-s1*coeff(k,k), c1*coeff(j,j) - s1 * coeff(k,j)); } else { res[0] = atan2(coeff(j,k), coeff(k,k)); Scalar c2 = Vector2(coeff(i,i), coeff(i,j)).norm(); if((odd && res[0]<Scalar(0)) || ((!odd) && res[0]>Scalar(0))) { if(res[0] > Scalar(0)) { res[0] -= Scalar(EIGEN_PI); } else { res[0] += Scalar(EIGEN_PI); } res[1] = atan2(-coeff(i,k), -c2); } else res[1] = atan2(-coeff(i,k), c2); Scalar s1 = sin(res[0]); Scalar c1 = cos(res[0]); res[2] = atan2(s1*coeff(k,i)-c1*coeff(j,i), c1*coeff(j,j) - s1 * coeff(k,j)); } if (!odd) res = -res; return res; } } // end namespace Eigen #endif // EIGEN_EULERANGLES_H
3,639
30.652174
125
h
abess
abess-master/python/include/Eigen/src/Geometry/Umeyama.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Hauke Heibel <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_UMEYAMA_H #define EIGEN_UMEYAMA_H // This file requires the user to include // * Eigen/Core // * Eigen/LU // * Eigen/SVD // * Eigen/Array namespace Eigen { #ifndef EIGEN_PARSED_BY_DOXYGEN // These helpers are required since it allows to use mixed types as parameters // for the Umeyama. The problem with mixed parameters is that the return type // cannot trivially be deduced when float and double types are mixed. namespace internal { // Compile time return type deduction for different MatrixBase types. // Different means here different alignment and parameters but the same underlying // real scalar type. template<typename MatrixType, typename OtherMatrixType> struct umeyama_transform_matrix_type { enum { MinRowsAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(MatrixType::RowsAtCompileTime, OtherMatrixType::RowsAtCompileTime), // When possible we want to choose some small fixed size value since the result // is likely to fit on the stack. So here, EIGEN_SIZE_MIN_PREFER_DYNAMIC is not what we want. HomogeneousDimension = int(MinRowsAtCompileTime) == Dynamic ? Dynamic : int(MinRowsAtCompileTime)+1 }; typedef Matrix<typename traits<MatrixType>::Scalar, HomogeneousDimension, HomogeneousDimension, AutoAlign | (traits<MatrixType>::Flags & RowMajorBit ? RowMajor : ColMajor), HomogeneousDimension, HomogeneousDimension > type; }; } #endif /** * \geometry_module \ingroup Geometry_Module * * \brief Returns the transformation between two point sets. * * The algorithm is based on: * "Least-squares estimation of transformation parameters between two point patterns", * Shinji Umeyama, PAMI 1991, DOI: 10.1109/34.88573 * * It estimates parameters \f$ c, \mathbf{R}, \f$ and \f$ \mathbf{t} \f$ such that * \f{align*} * \frac{1}{n} \sum_{i=1}^n \vert\vert y_i - (c\mathbf{R}x_i + \mathbf{t}) \vert\vert_2^2 * \f} * is minimized. * * The algorithm is based on the analysis of the covariance matrix * \f$ \Sigma_{\mathbf{x}\mathbf{y}} \in \mathbb{R}^{d \times d} \f$ * of the input point sets \f$ \mathbf{x} \f$ and \f$ \mathbf{y} \f$ where * \f$d\f$ is corresponding to the dimension (which is typically small). * The analysis is involving the SVD having a complexity of \f$O(d^3)\f$ * though the actual computational effort lies in the covariance * matrix computation which has an asymptotic lower bound of \f$O(dm)\f$ when * the input point sets have dimension \f$d \times m\f$. * * Currently the method is working only for floating point matrices. * * \todo Should the return type of umeyama() become a Transform? * * \param src Source points \f$ \mathbf{x} = \left( x_1, \hdots, x_n \right) \f$. * \param dst Destination points \f$ \mathbf{y} = \left( y_1, \hdots, y_n \right) \f$. * \param with_scaling Sets \f$ c=1 \f$ when <code>false</code> is passed. * \return The homogeneous transformation * \f{align*} * T = \begin{bmatrix} c\mathbf{R} & \mathbf{t} \\ \mathbf{0} & 1 \end{bmatrix} * \f} * minimizing the resudiual above. This transformation is always returned as an * Eigen::Matrix. */ template <typename Derived, typename OtherDerived> typename internal::umeyama_transform_matrix_type<Derived, OtherDerived>::type umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, bool with_scaling = true) { typedef typename internal::umeyama_transform_matrix_type<Derived, OtherDerived>::type TransformationMatrixType; typedef typename internal::traits<TransformationMatrixType>::Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL) EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename internal::traits<OtherDerived>::Scalar>::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) enum { Dimension = EIGEN_SIZE_MIN_PREFER_DYNAMIC(Derived::RowsAtCompileTime, OtherDerived::RowsAtCompileTime) }; typedef Matrix<Scalar, Dimension, 1> VectorType; typedef Matrix<Scalar, Dimension, Dimension> MatrixType; typedef typename internal::plain_matrix_type_row_major<Derived>::type RowMajorMatrixType; const Index m = src.rows(); // dimension const Index n = src.cols(); // number of measurements // required for demeaning ... const RealScalar one_over_n = RealScalar(1) / static_cast<RealScalar>(n); // computation of mean const VectorType src_mean = src.rowwise().sum() * one_over_n; const VectorType dst_mean = dst.rowwise().sum() * one_over_n; // demeaning of src and dst points const RowMajorMatrixType src_demean = src.colwise() - src_mean; const RowMajorMatrixType dst_demean = dst.colwise() - dst_mean; // Eq. (36)-(37) const Scalar src_var = src_demean.rowwise().squaredNorm().sum() * one_over_n; // Eq. (38) const MatrixType sigma = one_over_n * dst_demean * src_demean.transpose(); JacobiSVD<MatrixType> svd(sigma, ComputeFullU | ComputeFullV); // Initialize the resulting transformation with an identity matrix... TransformationMatrixType Rt = TransformationMatrixType::Identity(m+1,m+1); // Eq. (39) VectorType S = VectorType::Ones(m); if ( svd.matrixU().determinant() * svd.matrixV().determinant() < 0 ) S(m-1) = -1; // Eq. (40) and (43) Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose(); if (with_scaling) { // Eq. (42) const Scalar c = Scalar(1)/src_var * svd.singularValues().dot(S); // Eq. (41) Rt.col(m).head(m) = dst_mean; Rt.col(m).head(m).noalias() -= c*Rt.topLeftCorner(m,m)*src_mean; Rt.block(0,0,m,m) *= c; } else { Rt.col(m).head(m) = dst_mean; Rt.col(m).head(m).noalias() -= Rt.topLeftCorner(m,m)*src_mean; } return Rt; } } // end namespace Eigen #endif // EIGEN_UMEYAMA_H
6,191
36.077844
124
h
abess
abess-master/python/include/Eigen/src/Geometry/arch/Geometry_SSE.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Rohit Garg <[email protected]> // Copyright (C) 2009-2010 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GEOMETRY_SSE_H #define EIGEN_GEOMETRY_SSE_H namespace Eigen { namespace internal { template<class Derived, class OtherDerived> struct quat_product<Architecture::SSE, Derived, OtherDerived, float> { enum { AAlignment = traits<Derived>::Alignment, BAlignment = traits<OtherDerived>::Alignment, ResAlignment = traits<Quaternion<float> >::Alignment }; static inline Quaternion<float> run(const QuaternionBase<Derived>& _a, const QuaternionBase<OtherDerived>& _b) { Quaternion<float> res; const __m128 mask = _mm_setr_ps(0.f,0.f,0.f,-0.f); __m128 a = _a.coeffs().template packet<AAlignment>(0); __m128 b = _b.coeffs().template packet<BAlignment>(0); __m128 s1 = _mm_mul_ps(vec4f_swizzle1(a,1,2,0,2),vec4f_swizzle1(b,2,0,1,2)); __m128 s2 = _mm_mul_ps(vec4f_swizzle1(a,3,3,3,1),vec4f_swizzle1(b,0,1,2,1)); pstoret<float,Packet4f,ResAlignment>( &res.x(), _mm_add_ps(_mm_sub_ps(_mm_mul_ps(a,vec4f_swizzle1(b,3,3,3,3)), _mm_mul_ps(vec4f_swizzle1(a,2,0,1,0), vec4f_swizzle1(b,1,2,0,0))), _mm_xor_ps(mask,_mm_add_ps(s1,s2)))); return res; } }; template<class Derived> struct quat_conj<Architecture::SSE, Derived, float> { enum { ResAlignment = traits<Quaternion<float> >::Alignment }; static inline Quaternion<float> run(const QuaternionBase<Derived>& q) { Quaternion<float> res; const __m128 mask = _mm_setr_ps(-0.f,-0.f,-0.f,0.f); pstoret<float,Packet4f,ResAlignment>(&res.x(), _mm_xor_ps(mask, q.coeffs().template packet<traits<Derived>::Alignment>(0))); return res; } }; template<typename VectorLhs,typename VectorRhs> struct cross3_impl<Architecture::SSE,VectorLhs,VectorRhs,float,true> { enum { ResAlignment = traits<typename plain_matrix_type<VectorLhs>::type>::Alignment }; static inline typename plain_matrix_type<VectorLhs>::type run(const VectorLhs& lhs, const VectorRhs& rhs) { __m128 a = lhs.template packet<traits<VectorLhs>::Alignment>(0); __m128 b = rhs.template packet<traits<VectorRhs>::Alignment>(0); __m128 mul1=_mm_mul_ps(vec4f_swizzle1(a,1,2,0,3),vec4f_swizzle1(b,2,0,1,3)); __m128 mul2=_mm_mul_ps(vec4f_swizzle1(a,2,0,1,3),vec4f_swizzle1(b,1,2,0,3)); typename plain_matrix_type<VectorLhs>::type res; pstoret<float,Packet4f,ResAlignment>(&res.x(),_mm_sub_ps(mul1,mul2)); return res; } }; template<class Derived, class OtherDerived> struct quat_product<Architecture::SSE, Derived, OtherDerived, double> { enum { BAlignment = traits<OtherDerived>::Alignment, ResAlignment = traits<Quaternion<double> >::Alignment }; static inline Quaternion<double> run(const QuaternionBase<Derived>& _a, const QuaternionBase<OtherDerived>& _b) { const Packet2d mask = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0)); Quaternion<double> res; const double* a = _a.coeffs().data(); Packet2d b_xy = _b.coeffs().template packet<BAlignment>(0); Packet2d b_zw = _b.coeffs().template packet<BAlignment>(2); Packet2d a_xx = pset1<Packet2d>(a[0]); Packet2d a_yy = pset1<Packet2d>(a[1]); Packet2d a_zz = pset1<Packet2d>(a[2]); Packet2d a_ww = pset1<Packet2d>(a[3]); // two temporaries: Packet2d t1, t2; /* * t1 = ww*xy + yy*zw * t2 = zz*xy - xx*zw * res.xy = t1 +/- swap(t2) */ t1 = padd(pmul(a_ww, b_xy), pmul(a_yy, b_zw)); t2 = psub(pmul(a_zz, b_xy), pmul(a_xx, b_zw)); #ifdef EIGEN_VECTORIZE_SSE3 EIGEN_UNUSED_VARIABLE(mask) pstoret<double,Packet2d,ResAlignment>(&res.x(), _mm_addsub_pd(t1, preverse(t2))); #else pstoret<double,Packet2d,ResAlignment>(&res.x(), padd(t1, pxor(mask,preverse(t2)))); #endif /* * t1 = ww*zw - yy*xy * t2 = zz*zw + xx*xy * res.zw = t1 -/+ swap(t2) = swap( swap(t1) +/- t2) */ t1 = psub(pmul(a_ww, b_zw), pmul(a_yy, b_xy)); t2 = padd(pmul(a_zz, b_zw), pmul(a_xx, b_xy)); #ifdef EIGEN_VECTORIZE_SSE3 EIGEN_UNUSED_VARIABLE(mask) pstoret<double,Packet2d,ResAlignment>(&res.z(), preverse(_mm_addsub_pd(preverse(t1), t2))); #else pstoret<double,Packet2d,ResAlignment>(&res.z(), psub(t1, pxor(mask,preverse(t2)))); #endif return res; } }; template<class Derived> struct quat_conj<Architecture::SSE, Derived, double> { enum { ResAlignment = traits<Quaternion<double> >::Alignment }; static inline Quaternion<double> run(const QuaternionBase<Derived>& q) { Quaternion<double> res; const __m128d mask0 = _mm_setr_pd(-0.,-0.); const __m128d mask2 = _mm_setr_pd(-0.,0.); pstoret<double,Packet2d,ResAlignment>(&res.x(), _mm_xor_pd(mask0, q.coeffs().template packet<traits<Derived>::Alignment>(0))); pstoret<double,Packet2d,ResAlignment>(&res.z(), _mm_xor_pd(mask2, q.coeffs().template packet<traits<Derived>::Alignment>(2))); return res; } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_GEOMETRY_SSE_H
5,387
32.259259
130
h
abess
abess-master/python/include/Eigen/src/Householder/Householder.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Benoit Jacob <[email protected]> // Copyright (C) 2009 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_HOUSEHOLDER_H #define EIGEN_HOUSEHOLDER_H namespace Eigen { namespace internal { template<int n> struct decrement_size { enum { ret = n==Dynamic ? n : n-1 }; }; } /** Computes the elementary reflector H such that: * \f$ H *this = [ beta 0 ... 0]^T \f$ * where the transformation H is: * \f$ H = I - tau v v^*\f$ * and the vector v is: * \f$ v^T = [1 essential^T] \f$ * * The essential part of the vector \c v is stored in *this. * * On output: * \param tau the scaling factor of the Householder transformation * \param beta the result of H * \c *this * * \sa MatrixBase::makeHouseholder(), MatrixBase::applyHouseholderOnTheLeft(), * MatrixBase::applyHouseholderOnTheRight() */ template<typename Derived> void MatrixBase<Derived>::makeHouseholderInPlace(Scalar& tau, RealScalar& beta) { VectorBlock<Derived, internal::decrement_size<Base::SizeAtCompileTime>::ret> essentialPart(derived(), 1, size()-1); makeHouseholder(essentialPart, tau, beta); } /** Computes the elementary reflector H such that: * \f$ H *this = [ beta 0 ... 0]^T \f$ * where the transformation H is: * \f$ H = I - tau v v^*\f$ * and the vector v is: * \f$ v^T = [1 essential^T] \f$ * * On output: * \param essential the essential part of the vector \c v * \param tau the scaling factor of the Householder transformation * \param beta the result of H * \c *this * * \sa MatrixBase::makeHouseholderInPlace(), MatrixBase::applyHouseholderOnTheLeft(), * MatrixBase::applyHouseholderOnTheRight() */ template<typename Derived> template<typename EssentialPart> void MatrixBase<Derived>::makeHouseholder( EssentialPart& essential, Scalar& tau, RealScalar& beta) const { using std::sqrt; using numext::conj; EIGEN_STATIC_ASSERT_VECTOR_ONLY(EssentialPart) VectorBlock<const Derived, EssentialPart::SizeAtCompileTime> tail(derived(), 1, size()-1); RealScalar tailSqNorm = size()==1 ? RealScalar(0) : tail.squaredNorm(); Scalar c0 = coeff(0); const RealScalar tol = (std::numeric_limits<RealScalar>::min)(); if(tailSqNorm <= tol && numext::abs2(numext::imag(c0))<=tol) { tau = RealScalar(0); beta = numext::real(c0); essential.setZero(); } else { beta = sqrt(numext::abs2(c0) + tailSqNorm); if (numext::real(c0)>=RealScalar(0)) beta = -beta; essential = tail / (c0 - beta); tau = conj((beta - c0) / beta); } } /** Apply the elementary reflector H given by * \f$ H = I - tau v v^*\f$ * with * \f$ v^T = [1 essential^T] \f$ * from the left to a vector or matrix. * * On input: * \param essential the essential part of the vector \c v * \param tau the scaling factor of the Householder transformation * \param workspace a pointer to working space with at least * this->cols() * essential.size() entries * * \sa MatrixBase::makeHouseholder(), MatrixBase::makeHouseholderInPlace(), * MatrixBase::applyHouseholderOnTheRight() */ template<typename Derived> template<typename EssentialPart> void MatrixBase<Derived>::applyHouseholderOnTheLeft( const EssentialPart& essential, const Scalar& tau, Scalar* workspace) { if(rows() == 1) { *this *= Scalar(1)-tau; } else if(tau!=Scalar(0)) { Map<typename internal::plain_row_type<PlainObject>::type> tmp(workspace,cols()); Block<Derived, EssentialPart::SizeAtCompileTime, Derived::ColsAtCompileTime> bottom(derived(), 1, 0, rows()-1, cols()); tmp.noalias() = essential.adjoint() * bottom; tmp += this->row(0); this->row(0) -= tau * tmp; bottom.noalias() -= tau * essential * tmp; } } /** Apply the elementary reflector H given by * \f$ H = I - tau v v^*\f$ * with * \f$ v^T = [1 essential^T] \f$ * from the right to a vector or matrix. * * On input: * \param essential the essential part of the vector \c v * \param tau the scaling factor of the Householder transformation * \param workspace a pointer to working space with at least * this->cols() * essential.size() entries * * \sa MatrixBase::makeHouseholder(), MatrixBase::makeHouseholderInPlace(), * MatrixBase::applyHouseholderOnTheLeft() */ template<typename Derived> template<typename EssentialPart> void MatrixBase<Derived>::applyHouseholderOnTheRight( const EssentialPart& essential, const Scalar& tau, Scalar* workspace) { if(cols() == 1) { *this *= Scalar(1)-tau; } else if(tau!=Scalar(0)) { Map<typename internal::plain_col_type<PlainObject>::type> tmp(workspace,rows()); Block<Derived, Derived::RowsAtCompileTime, EssentialPart::SizeAtCompileTime> right(derived(), 0, 1, rows(), cols()-1); tmp.noalias() = right * essential.conjugate(); tmp += this->col(0); this->col(0) -= tau * tmp; right.noalias() -= tau * tmp * essential.transpose(); } } } // end namespace Eigen #endif // EIGEN_HOUSEHOLDER_H
5,345
29.901734
123
h
abess
abess-master/python/include/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011-2014 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_BASIC_PRECONDITIONERS_H #define EIGEN_BASIC_PRECONDITIONERS_H namespace Eigen { /** \ingroup IterativeLinearSolvers_Module * \brief A preconditioner based on the digonal entries * * This class allows to approximately solve for A.x = b problems assuming A is a diagonal matrix. * In other words, this preconditioner neglects all off diagonal entries and, in Eigen's language, solves for: \code A.diagonal().asDiagonal() . x = b \endcode * * \tparam _Scalar the type of the scalar. * * \implsparsesolverconcept * * This preconditioner is suitable for both selfadjoint and general problems. * The diagonal entries are pre-inverted and stored into a dense vector. * * \note A variant that has yet to be implemented would attempt to preserve the norm of each column. * * \sa class LeastSquareDiagonalPreconditioner, class ConjugateGradient */ template <typename _Scalar> class DiagonalPreconditioner { typedef _Scalar Scalar; typedef Matrix<Scalar,Dynamic,1> Vector; public: typedef typename Vector::StorageIndex StorageIndex; enum { ColsAtCompileTime = Dynamic, MaxColsAtCompileTime = Dynamic }; DiagonalPreconditioner() : m_isInitialized(false) {} template<typename MatType> explicit DiagonalPreconditioner(const MatType& mat) : m_invdiag(mat.cols()) { compute(mat); } Index rows() const { return m_invdiag.size(); } Index cols() const { return m_invdiag.size(); } template<typename MatType> DiagonalPreconditioner& analyzePattern(const MatType& ) { return *this; } template<typename MatType> DiagonalPreconditioner& factorize(const MatType& mat) { m_invdiag.resize(mat.cols()); for(int j=0; j<mat.outerSize(); ++j) { typename MatType::InnerIterator it(mat,j); while(it && it.index()!=j) ++it; if(it && it.index()==j && it.value()!=Scalar(0)) m_invdiag(j) = Scalar(1)/it.value(); else m_invdiag(j) = Scalar(1); } m_isInitialized = true; return *this; } template<typename MatType> DiagonalPreconditioner& compute(const MatType& mat) { return factorize(mat); } /** \internal */ template<typename Rhs, typename Dest> void _solve_impl(const Rhs& b, Dest& x) const { x = m_invdiag.array() * b.array() ; } template<typename Rhs> inline const Solve<DiagonalPreconditioner, Rhs> solve(const MatrixBase<Rhs>& b) const { eigen_assert(m_isInitialized && "DiagonalPreconditioner is not initialized."); eigen_assert(m_invdiag.size()==b.rows() && "DiagonalPreconditioner::solve(): invalid number of rows of the right hand side matrix b"); return Solve<DiagonalPreconditioner, Rhs>(*this, b.derived()); } ComputationInfo info() { return Success; } protected: Vector m_invdiag; bool m_isInitialized; }; /** \ingroup IterativeLinearSolvers_Module * \brief Jacobi preconditioner for LeastSquaresConjugateGradient * * This class allows to approximately solve for A' A x = A' b problems assuming A' A is a diagonal matrix. * In other words, this preconditioner neglects all off diagonal entries and, in Eigen's language, solves for: \code (A.adjoint() * A).diagonal().asDiagonal() * x = b \endcode * * \tparam _Scalar the type of the scalar. * * \implsparsesolverconcept * * The diagonal entries are pre-inverted and stored into a dense vector. * * \sa class LeastSquaresConjugateGradient, class DiagonalPreconditioner */ template <typename _Scalar> class LeastSquareDiagonalPreconditioner : public DiagonalPreconditioner<_Scalar> { typedef _Scalar Scalar; typedef typename NumTraits<Scalar>::Real RealScalar; typedef DiagonalPreconditioner<_Scalar> Base; using Base::m_invdiag; public: LeastSquareDiagonalPreconditioner() : Base() {} template<typename MatType> explicit LeastSquareDiagonalPreconditioner(const MatType& mat) : Base() { compute(mat); } template<typename MatType> LeastSquareDiagonalPreconditioner& analyzePattern(const MatType& ) { return *this; } template<typename MatType> LeastSquareDiagonalPreconditioner& factorize(const MatType& mat) { // Compute the inverse squared-norm of each column of mat m_invdiag.resize(mat.cols()); if(MatType::IsRowMajor) { m_invdiag.setZero(); for(Index j=0; j<mat.outerSize(); ++j) { for(typename MatType::InnerIterator it(mat,j); it; ++it) m_invdiag(it.index()) += numext::abs2(it.value()); } for(Index j=0; j<mat.cols(); ++j) if(numext::real(m_invdiag(j))>RealScalar(0)) m_invdiag(j) = RealScalar(1)/numext::real(m_invdiag(j)); } else { for(Index j=0; j<mat.outerSize(); ++j) { RealScalar sum = mat.innerVector(j).squaredNorm(); if(sum>RealScalar(0)) m_invdiag(j) = RealScalar(1)/sum; else m_invdiag(j) = RealScalar(1); } } Base::m_isInitialized = true; return *this; } template<typename MatType> LeastSquareDiagonalPreconditioner& compute(const MatType& mat) { return factorize(mat); } ComputationInfo info() { return Success; } protected: }; /** \ingroup IterativeLinearSolvers_Module * \brief A naive preconditioner which approximates any matrix as the identity matrix * * \implsparsesolverconcept * * \sa class DiagonalPreconditioner */ class IdentityPreconditioner { public: IdentityPreconditioner() {} template<typename MatrixType> explicit IdentityPreconditioner(const MatrixType& ) {} template<typename MatrixType> IdentityPreconditioner& analyzePattern(const MatrixType& ) { return *this; } template<typename MatrixType> IdentityPreconditioner& factorize(const MatrixType& ) { return *this; } template<typename MatrixType> IdentityPreconditioner& compute(const MatrixType& ) { return *this; } template<typename Rhs> inline const Rhs& solve(const Rhs& b) const { return b; } ComputationInfo info() { return Success; } }; } // end namespace Eigen #endif // EIGEN_BASIC_PRECONDITIONERS_H
6,763
28.797357
111
h
abess
abess-master/python/include/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011-2014 Gael Guennebaud <[email protected]> // Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_BICGSTAB_H #define EIGEN_BICGSTAB_H namespace Eigen { namespace internal { /** \internal Low-level bi conjugate gradient stabilized algorithm * \param mat The matrix A * \param rhs The right hand side vector b * \param x On input and initial solution, on output the computed solution. * \param precond A preconditioner being able to efficiently solve for an * approximation of Ax=b (regardless of b) * \param iters On input the max number of iteration, on output the number of performed iterations. * \param tol_error On input the tolerance error, on output an estimation of the relative error. * \return false in the case of numerical issue, for example a break down of BiCGSTAB. */ template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner> bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x, const Preconditioner& precond, Index& iters, typename Dest::RealScalar& tol_error) { using std::sqrt; using std::abs; typedef typename Dest::RealScalar RealScalar; typedef typename Dest::Scalar Scalar; typedef Matrix<Scalar,Dynamic,1> VectorType; RealScalar tol = tol_error; Index maxIters = iters; Index n = mat.cols(); VectorType r = rhs - mat * x; VectorType r0 = r; RealScalar r0_sqnorm = r0.squaredNorm(); RealScalar rhs_sqnorm = rhs.squaredNorm(); if(rhs_sqnorm == 0) { x.setZero(); return true; } Scalar rho = 1; Scalar alpha = 1; Scalar w = 1; VectorType v = VectorType::Zero(n), p = VectorType::Zero(n); VectorType y(n), z(n); VectorType kt(n), ks(n); VectorType s(n), t(n); RealScalar tol2 = tol*tol*rhs_sqnorm; RealScalar eps2 = NumTraits<Scalar>::epsilon()*NumTraits<Scalar>::epsilon(); Index i = 0; Index restarts = 0; while ( r.squaredNorm() > tol2 && i<maxIters ) { Scalar rho_old = rho; rho = r0.dot(r); if (abs(rho) < eps2*r0_sqnorm) { // The new residual vector became too orthogonal to the arbitrarily chosen direction r0 // Let's restart with a new r0: r = rhs - mat * x; r0 = r; rho = r0_sqnorm = r.squaredNorm(); if(restarts++ == 0) i = 0; } Scalar beta = (rho/rho_old) * (alpha / w); p = r + beta * (p - w * v); y = precond.solve(p); v.noalias() = mat * y; alpha = rho / r0.dot(v); s = r - alpha * v; z = precond.solve(s); t.noalias() = mat * z; RealScalar tmp = t.squaredNorm(); if(tmp>RealScalar(0)) w = t.dot(s) / tmp; else w = Scalar(0); x += alpha * y + w * z; r = s - w * t; ++i; } tol_error = sqrt(r.squaredNorm()/rhs_sqnorm); iters = i; return true; } } template< typename _MatrixType, typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> > class BiCGSTAB; namespace internal { template< typename _MatrixType, typename _Preconditioner> struct traits<BiCGSTAB<_MatrixType,_Preconditioner> > { typedef _MatrixType MatrixType; typedef _Preconditioner Preconditioner; }; } /** \ingroup IterativeLinearSolvers_Module * \brief A bi conjugate gradient stabilized solver for sparse square problems * * This class allows to solve for A.x = b sparse linear problems using a bi conjugate gradient * stabilized algorithm. The vectors x and b can be either dense or sparse. * * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix. * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner * * \implsparsesolverconcept * * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations * and NumTraits<Scalar>::epsilon() for the tolerance. * * The tolerance corresponds to the relative residual error: |Ax-b|/|b| * * \b Performance: when using sparse matrices, best performance is achied for a row-major sparse matrix format. * Moreover, in this case multi-threading can be exploited if the user code is compiled with OpenMP enabled. * See \ref TopicMultiThreading for details. * * This class can be used as the direct solver classes. Here is a typical usage example: * \include BiCGSTAB_simple.cpp * * By default the iterations start with x=0 as an initial guess of the solution. * One can control the start using the solveWithGuess() method. * * BiCGSTAB can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink. * * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner */ template< typename _MatrixType, typename _Preconditioner> class BiCGSTAB : public IterativeSolverBase<BiCGSTAB<_MatrixType,_Preconditioner> > { typedef IterativeSolverBase<BiCGSTAB> Base; using Base::matrix; using Base::m_error; using Base::m_iterations; using Base::m_info; using Base::m_isInitialized; public: typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef _Preconditioner Preconditioner; public: /** Default constructor. */ BiCGSTAB() : Base() {} /** Initialize the solver with matrix \a A for further \c Ax=b solving. * * This constructor is a shortcut for the default constructor followed * by a call to compute(). * * \warning this class stores a reference to the matrix A as well as some * precomputed values that depend on it. Therefore, if \a A is changed * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ template<typename MatrixDerived> explicit BiCGSTAB(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {} ~BiCGSTAB() {} /** \internal */ template<typename Rhs,typename Dest> void _solve_with_guess_impl(const Rhs& b, Dest& x) const { bool failed = false; for(Index j=0; j<b.cols(); ++j) { m_iterations = Base::maxIterations(); m_error = Base::m_tolerance; typename Dest::ColXpr xj(x,j); if(!internal::bicgstab(matrix(), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error)) failed = true; } m_info = failed ? NumericalIssue : m_error <= Base::m_tolerance ? Success : NoConvergence; m_isInitialized = true; } /** \internal */ using Base::_solve_impl; template<typename Rhs,typename Dest> void _solve_impl(const MatrixBase<Rhs>& b, Dest& x) const { x.resize(this->rows(),b.cols()); x.setZero(); _solve_with_guess_impl(b,x); } protected: }; } // end namespace Eigen #endif // EIGEN_BICGSTAB_H
7,251
30.668122
121
h
abess
abess-master/python/include/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]> // Copyright (C) 2014 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_INCOMPLETE_LUT_H #define EIGEN_INCOMPLETE_LUT_H namespace Eigen { namespace internal { /** \internal * Compute a quick-sort split of a vector * On output, the vector row is permuted such that its elements satisfy * abs(row(i)) >= abs(row(ncut)) if i<ncut * abs(row(i)) <= abs(row(ncut)) if i>ncut * \param row The vector of values * \param ind The array of index for the elements in @p row * \param ncut The number of largest elements to keep **/ template <typename VectorV, typename VectorI> Index QuickSplit(VectorV &row, VectorI &ind, Index ncut) { typedef typename VectorV::RealScalar RealScalar; using std::swap; using std::abs; Index mid; Index n = row.size(); /* length of the vector */ Index first, last ; ncut--; /* to fit the zero-based indices */ first = 0; last = n-1; if (ncut < first || ncut > last ) return 0; do { mid = first; RealScalar abskey = abs(row(mid)); for (Index j = first + 1; j <= last; j++) { if ( abs(row(j)) > abskey) { ++mid; swap(row(mid), row(j)); swap(ind(mid), ind(j)); } } /* Interchange for the pivot element */ swap(row(mid), row(first)); swap(ind(mid), ind(first)); if (mid > ncut) last = mid - 1; else if (mid < ncut ) first = mid + 1; } while (mid != ncut ); return 0; /* mid is equal to ncut */ } }// end namespace internal /** \ingroup IterativeLinearSolvers_Module * \class IncompleteLUT * \brief Incomplete LU factorization with dual-threshold strategy * * \implsparsesolverconcept * * During the numerical factorization, two dropping rules are used : * 1) any element whose magnitude is less than some tolerance is dropped. * This tolerance is obtained by multiplying the input tolerance @p droptol * by the average magnitude of all the original elements in the current row. * 2) After the elimination of the row, only the @p fill largest elements in * the L part and the @p fill largest elements in the U part are kept * (in addition to the diagonal element ). Note that @p fill is computed from * the input parameter @p fillfactor which is used the ratio to control the fill_in * relatively to the initial number of nonzero elements. * * The two extreme cases are when @p droptol=0 (to keep all the @p fill*2 largest elements) * and when @p fill=n/2 with @p droptol being different to zero. * * References : Yousef Saad, ILUT: A dual threshold incomplete LU factorization, * Numerical Linear Algebra with Applications, 1(4), pp 387-402, 1994. * * NOTE : The following implementation is derived from the ILUT implementation * in the SPARSKIT package, Copyright (C) 2005, the Regents of the University of Minnesota * released under the terms of the GNU LGPL: * http://www-users.cs.umn.edu/~saad/software/SPARSKIT/README * However, Yousef Saad gave us permission to relicense his ILUT code to MPL2. * See the Eigen mailing list archive, thread: ILUT, date: July 8, 2012: * http://listengine.tuxfamily.org/lists.tuxfamily.org/eigen/2012/07/msg00064.html * alternatively, on GMANE: * http://comments.gmane.org/gmane.comp.lib.eigen/3302 */ template <typename _Scalar, typename _StorageIndex = int> class IncompleteLUT : public SparseSolverBase<IncompleteLUT<_Scalar, _StorageIndex> > { protected: typedef SparseSolverBase<IncompleteLUT> Base; using Base::m_isInitialized; public: typedef _Scalar Scalar; typedef _StorageIndex StorageIndex; typedef typename NumTraits<Scalar>::Real RealScalar; typedef Matrix<Scalar,Dynamic,1> Vector; typedef Matrix<StorageIndex,Dynamic,1> VectorI; typedef SparseMatrix<Scalar,RowMajor,StorageIndex> FactorType; enum { ColsAtCompileTime = Dynamic, MaxColsAtCompileTime = Dynamic }; public: IncompleteLUT() : m_droptol(NumTraits<Scalar>::dummy_precision()), m_fillfactor(10), m_analysisIsOk(false), m_factorizationIsOk(false) {} template<typename MatrixType> explicit IncompleteLUT(const MatrixType& mat, const RealScalar& droptol=NumTraits<Scalar>::dummy_precision(), int fillfactor = 10) : m_droptol(droptol),m_fillfactor(fillfactor), m_analysisIsOk(false),m_factorizationIsOk(false) { eigen_assert(fillfactor != 0); compute(mat); } Index rows() const { return m_lu.rows(); } Index cols() const { return m_lu.cols(); } /** \brief Reports whether previous computation was successful. * * \returns \c Success if computation was succesful, * \c NumericalIssue if the matrix.appears to be negative. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "IncompleteLUT is not initialized."); return m_info; } template<typename MatrixType> void analyzePattern(const MatrixType& amat); template<typename MatrixType> void factorize(const MatrixType& amat); /** * Compute an incomplete LU factorization with dual threshold on the matrix mat * No pivoting is done in this version * **/ template<typename MatrixType> IncompleteLUT& compute(const MatrixType& amat) { analyzePattern(amat); factorize(amat); return *this; } void setDroptol(const RealScalar& droptol); void setFillfactor(int fillfactor); template<typename Rhs, typename Dest> void _solve_impl(const Rhs& b, Dest& x) const { x = m_Pinv * b; x = m_lu.template triangularView<UnitLower>().solve(x); x = m_lu.template triangularView<Upper>().solve(x); x = m_P * x; } protected: /** keeps off-diagonal entries; drops diagonal entries */ struct keep_diag { inline bool operator() (const Index& row, const Index& col, const Scalar&) const { return row!=col; } }; protected: FactorType m_lu; RealScalar m_droptol; int m_fillfactor; bool m_analysisIsOk; bool m_factorizationIsOk; ComputationInfo m_info; PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_P; // Fill-reducing permutation PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_Pinv; // Inverse permutation }; /** * Set control parameter droptol * \param droptol Drop any element whose magnitude is less than this tolerance **/ template<typename Scalar, typename StorageIndex> void IncompleteLUT<Scalar,StorageIndex>::setDroptol(const RealScalar& droptol) { this->m_droptol = droptol; } /** * Set control parameter fillfactor * \param fillfactor This is used to compute the number @p fill_in of largest elements to keep on each row. **/ template<typename Scalar, typename StorageIndex> void IncompleteLUT<Scalar,StorageIndex>::setFillfactor(int fillfactor) { this->m_fillfactor = fillfactor; } template <typename Scalar, typename StorageIndex> template<typename _MatrixType> void IncompleteLUT<Scalar,StorageIndex>::analyzePattern(const _MatrixType& amat) { // Compute the Fill-reducing permutation // Since ILUT does not perform any numerical pivoting, // it is highly preferable to keep the diagonal through symmetric permutations. #ifndef EIGEN_MPL2_ONLY // To this end, let's symmetrize the pattern and perform AMD on it. SparseMatrix<Scalar,ColMajor, StorageIndex> mat1 = amat; SparseMatrix<Scalar,ColMajor, StorageIndex> mat2 = amat.transpose(); // FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice. // on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered... SparseMatrix<Scalar,ColMajor, StorageIndex> AtA = mat2 + mat1; AMDOrdering<StorageIndex> ordering; ordering(AtA,m_P); m_Pinv = m_P.inverse(); // cache the inverse permutation #else // If AMD is not available, (MPL2-only), then let's use the slower COLAMD routine. SparseMatrix<Scalar,ColMajor, StorageIndex> mat1 = amat; COLAMDOrdering<StorageIndex> ordering; ordering(mat1,m_Pinv); m_P = m_Pinv.inverse(); #endif m_analysisIsOk = true; m_factorizationIsOk = false; m_isInitialized = true; } template <typename Scalar, typename StorageIndex> template<typename _MatrixType> void IncompleteLUT<Scalar,StorageIndex>::factorize(const _MatrixType& amat) { using std::sqrt; using std::swap; using std::abs; using internal::convert_index; eigen_assert((amat.rows() == amat.cols()) && "The factorization should be done on a square matrix"); Index n = amat.cols(); // Size of the matrix m_lu.resize(n,n); // Declare Working vectors and variables Vector u(n) ; // real values of the row -- maximum size is n -- VectorI ju(n); // column position of the values in u -- maximum size is n VectorI jr(n); // Indicate the position of the nonzero elements in the vector u -- A zero location is indicated by -1 // Apply the fill-reducing permutation eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); SparseMatrix<Scalar,RowMajor, StorageIndex> mat; mat = amat.twistedBy(m_Pinv); // Initialization jr.fill(-1); ju.fill(0); u.fill(0); // number of largest elements to keep in each row: Index fill_in = (amat.nonZeros()*m_fillfactor)/n + 1; if (fill_in > n) fill_in = n; // number of largest nonzero elements to keep in the L and the U part of the current row: Index nnzL = fill_in/2; Index nnzU = nnzL; m_lu.reserve(n * (nnzL + nnzU + 1)); // global loop over the rows of the sparse matrix for (Index ii = 0; ii < n; ii++) { // 1 - copy the lower and the upper part of the row i of mat in the working vector u Index sizeu = 1; // number of nonzero elements in the upper part of the current row Index sizel = 0; // number of nonzero elements in the lower part of the current row ju(ii) = convert_index<StorageIndex>(ii); u(ii) = 0; jr(ii) = convert_index<StorageIndex>(ii); RealScalar rownorm = 0; typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii for (; j_it; ++j_it) { Index k = j_it.index(); if (k < ii) { // copy the lower part ju(sizel) = convert_index<StorageIndex>(k); u(sizel) = j_it.value(); jr(k) = convert_index<StorageIndex>(sizel); ++sizel; } else if (k == ii) { u(ii) = j_it.value(); } else { // copy the upper part Index jpos = ii + sizeu; ju(jpos) = convert_index<StorageIndex>(k); u(jpos) = j_it.value(); jr(k) = convert_index<StorageIndex>(jpos); ++sizeu; } rownorm += numext::abs2(j_it.value()); } // 2 - detect possible zero row if(rownorm==0) { m_info = NumericalIssue; return; } // Take the 2-norm of the current row as a relative tolerance rownorm = sqrt(rownorm); // 3 - eliminate the previous nonzero rows Index jj = 0; Index len = 0; while (jj < sizel) { // In order to eliminate in the correct order, // we must select first the smallest column index among ju(jj:sizel) Index k; Index minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment k += jj; if (minrow != ju(jj)) { // swap the two locations Index j = ju(jj); swap(ju(jj), ju(k)); jr(minrow) = convert_index<StorageIndex>(jj); jr(j) = convert_index<StorageIndex>(k); swap(u(jj), u(k)); } // Reset this location jr(minrow) = -1; // Start elimination typename FactorType::InnerIterator ki_it(m_lu, minrow); while (ki_it && ki_it.index() < minrow) ++ki_it; eigen_internal_assert(ki_it && ki_it.col()==minrow); Scalar fact = u(jj) / ki_it.value(); // drop too small elements if(abs(fact) <= m_droptol) { jj++; continue; } // linear combination of the current row ii and the row minrow ++ki_it; for (; ki_it; ++ki_it) { Scalar prod = fact * ki_it.value(); Index j = ki_it.index(); Index jpos = jr(j); if (jpos == -1) // fill-in element { Index newpos; if (j >= ii) // dealing with the upper part { newpos = ii + sizeu; sizeu++; eigen_internal_assert(sizeu<=n); } else // dealing with the lower part { newpos = sizel; sizel++; eigen_internal_assert(sizel<=ii); } ju(newpos) = convert_index<StorageIndex>(j); u(newpos) = -prod; jr(j) = convert_index<StorageIndex>(newpos); } else u(jpos) -= prod; } // store the pivot element u(len) = fact; ju(len) = convert_index<StorageIndex>(minrow); ++len; jj++; } // end of the elimination on the row ii // reset the upper part of the pointer jr to zero for(Index k = 0; k <sizeu; k++) jr(ju(ii+k)) = -1; // 4 - partially sort and insert the elements in the m_lu matrix // sort the L-part of the row sizel = len; len = (std::min)(sizel, nnzL); typename Vector::SegmentReturnType ul(u.segment(0, sizel)); typename VectorI::SegmentReturnType jul(ju.segment(0, sizel)); internal::QuickSplit(ul, jul, len); // store the largest m_fill elements of the L part m_lu.startVec(ii); for(Index k = 0; k < len; k++) m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k); // store the diagonal element // apply a shifting rule to avoid zero pivots (we are doing an incomplete factorization) if (u(ii) == Scalar(0)) u(ii) = sqrt(m_droptol) * rownorm; m_lu.insertBackByOuterInnerUnordered(ii, ii) = u(ii); // sort the U-part of the row // apply the dropping rule first len = 0; for(Index k = 1; k < sizeu; k++) { if(abs(u(ii+k)) > m_droptol * rownorm ) { ++len; u(ii + len) = u(ii + k); ju(ii + len) = ju(ii + k); } } sizeu = len + 1; // +1 to take into account the diagonal element len = (std::min)(sizeu, nnzU); typename Vector::SegmentReturnType uu(u.segment(ii+1, sizeu-1)); typename VectorI::SegmentReturnType juu(ju.segment(ii+1, sizeu-1)); internal::QuickSplit(uu, juu, len); // store the largest elements of the U part for(Index k = ii + 1; k < ii + len; k++) m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k); } m_lu.finalize(); m_lu.makeCompressed(); m_factorizationIsOk = true; m_info = Success; } } // end namespace Eigen #endif // EIGEN_INCOMPLETE_LUT_H
15,232
31.900648
134
h
abess
abess-master/python/include/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011-2014 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_ITERATIVE_SOLVER_BASE_H #define EIGEN_ITERATIVE_SOLVER_BASE_H namespace Eigen { namespace internal { template<typename MatrixType> struct is_ref_compatible_impl { private: template <typename T0> struct any_conversion { template <typename T> any_conversion(const volatile T&); template <typename T> any_conversion(T&); }; struct yes {int a[1];}; struct no {int a[2];}; template<typename T> static yes test(const Ref<const T>&, int); template<typename T> static no test(any_conversion<T>, ...); public: static MatrixType ms_from; enum { value = sizeof(test<MatrixType>(ms_from, 0))==sizeof(yes) }; }; template<typename MatrixType> struct is_ref_compatible { enum { value = is_ref_compatible_impl<typename remove_all<MatrixType>::type>::value }; }; template<typename MatrixType, bool MatrixFree = !internal::is_ref_compatible<MatrixType>::value> class generic_matrix_wrapper; // We have an explicit matrix at hand, compatible with Ref<> template<typename MatrixType> class generic_matrix_wrapper<MatrixType,false> { public: typedef Ref<const MatrixType> ActualMatrixType; template<int UpLo> struct ConstSelfAdjointViewReturnType { typedef typename ActualMatrixType::template ConstSelfAdjointViewReturnType<UpLo>::Type Type; }; enum { MatrixFree = false }; generic_matrix_wrapper() : m_dummy(0,0), m_matrix(m_dummy) {} template<typename InputType> generic_matrix_wrapper(const InputType &mat) : m_matrix(mat) {} const ActualMatrixType& matrix() const { return m_matrix; } template<typename MatrixDerived> void grab(const EigenBase<MatrixDerived> &mat) { m_matrix.~Ref<const MatrixType>(); ::new (&m_matrix) Ref<const MatrixType>(mat.derived()); } void grab(const Ref<const MatrixType> &mat) { if(&(mat.derived()) != &m_matrix) { m_matrix.~Ref<const MatrixType>(); ::new (&m_matrix) Ref<const MatrixType>(mat); } } protected: MatrixType m_dummy; // used to default initialize the Ref<> object ActualMatrixType m_matrix; }; // MatrixType is not compatible with Ref<> -> matrix-free wrapper template<typename MatrixType> class generic_matrix_wrapper<MatrixType,true> { public: typedef MatrixType ActualMatrixType; template<int UpLo> struct ConstSelfAdjointViewReturnType { typedef ActualMatrixType Type; }; enum { MatrixFree = true }; generic_matrix_wrapper() : mp_matrix(0) {} generic_matrix_wrapper(const MatrixType &mat) : mp_matrix(&mat) {} const ActualMatrixType& matrix() const { return *mp_matrix; } void grab(const MatrixType &mat) { mp_matrix = &mat; } protected: const ActualMatrixType *mp_matrix; }; } /** \ingroup IterativeLinearSolvers_Module * \brief Base class for linear iterative solvers * * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner */ template< typename Derived> class IterativeSolverBase : public SparseSolverBase<Derived> { protected: typedef SparseSolverBase<Derived> Base; using Base::m_isInitialized; public: typedef typename internal::traits<Derived>::MatrixType MatrixType; typedef typename internal::traits<Derived>::Preconditioner Preconditioner; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::StorageIndex StorageIndex; typedef typename MatrixType::RealScalar RealScalar; enum { ColsAtCompileTime = MatrixType::ColsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime }; public: using Base::derived; /** Default constructor. */ IterativeSolverBase() { init(); } /** Initialize the solver with matrix \a A for further \c Ax=b solving. * * This constructor is a shortcut for the default constructor followed * by a call to compute(). * * \warning this class stores a reference to the matrix A as well as some * precomputed values that depend on it. Therefore, if \a A is changed * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ template<typename MatrixDerived> explicit IterativeSolverBase(const EigenBase<MatrixDerived>& A) : m_matrixWrapper(A.derived()) { init(); compute(matrix()); } ~IterativeSolverBase() {} /** Initializes the iterative solver for the sparsity pattern of the matrix \a A for further solving \c Ax=b problems. * * Currently, this function mostly calls analyzePattern on the preconditioner. In the future * we might, for instance, implement column reordering for faster matrix vector products. */ template<typename MatrixDerived> Derived& analyzePattern(const EigenBase<MatrixDerived>& A) { grab(A.derived()); m_preconditioner.analyzePattern(matrix()); m_isInitialized = true; m_analysisIsOk = true; m_info = m_preconditioner.info(); return derived(); } /** Initializes the iterative solver with the numerical values of the matrix \a A for further solving \c Ax=b problems. * * Currently, this function mostly calls factorize on the preconditioner. * * \warning this class stores a reference to the matrix A as well as some * precomputed values that depend on it. Therefore, if \a A is changed * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ template<typename MatrixDerived> Derived& factorize(const EigenBase<MatrixDerived>& A) { eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); grab(A.derived()); m_preconditioner.factorize(matrix()); m_factorizationIsOk = true; m_info = m_preconditioner.info(); return derived(); } /** Initializes the iterative solver with the matrix \a A for further solving \c Ax=b problems. * * Currently, this function mostly initializes/computes the preconditioner. In the future * we might, for instance, implement column reordering for faster matrix vector products. * * \warning this class stores a reference to the matrix A as well as some * precomputed values that depend on it. Therefore, if \a A is changed * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ template<typename MatrixDerived> Derived& compute(const EigenBase<MatrixDerived>& A) { grab(A.derived()); m_preconditioner.compute(matrix()); m_isInitialized = true; m_analysisIsOk = true; m_factorizationIsOk = true; m_info = m_preconditioner.info(); return derived(); } /** \internal */ Index rows() const { return matrix().rows(); } /** \internal */ Index cols() const { return matrix().cols(); } /** \returns the tolerance threshold used by the stopping criteria. * \sa setTolerance() */ RealScalar tolerance() const { return m_tolerance; } /** Sets the tolerance threshold used by the stopping criteria. * * This value is used as an upper bound to the relative residual error: |Ax-b|/|b|. * The default value is the machine precision given by NumTraits<Scalar>::epsilon() */ Derived& setTolerance(const RealScalar& tolerance) { m_tolerance = tolerance; return derived(); } /** \returns a read-write reference to the preconditioner for custom configuration. */ Preconditioner& preconditioner() { return m_preconditioner; } /** \returns a read-only reference to the preconditioner. */ const Preconditioner& preconditioner() const { return m_preconditioner; } /** \returns the max number of iterations. * It is either the value setted by setMaxIterations or, by default, * twice the number of columns of the matrix. */ Index maxIterations() const { return (m_maxIterations<0) ? 2*matrix().cols() : m_maxIterations; } /** Sets the max number of iterations. * Default is twice the number of columns of the matrix. */ Derived& setMaxIterations(Index maxIters) { m_maxIterations = maxIters; return derived(); } /** \returns the number of iterations performed during the last solve */ Index iterations() const { eigen_assert(m_isInitialized && "ConjugateGradient is not initialized."); return m_iterations; } /** \returns the tolerance error reached during the last solve. * It is a close approximation of the true relative residual error |Ax-b|/|b|. */ RealScalar error() const { eigen_assert(m_isInitialized && "ConjugateGradient is not initialized."); return m_error; } /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A * and \a x0 as an initial solution. * * \sa solve(), compute() */ template<typename Rhs,typename Guess> inline const SolveWithGuess<Derived, Rhs, Guess> solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const { eigen_assert(m_isInitialized && "Solver is not initialized."); eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b"); return SolveWithGuess<Derived, Rhs, Guess>(derived(), b.derived(), x0); } /** \returns Success if the iterations converged, and NoConvergence otherwise. */ ComputationInfo info() const { eigen_assert(m_isInitialized && "IterativeSolverBase is not initialized."); return m_info; } /** \internal */ template<typename Rhs, typename DestDerived> void _solve_impl(const Rhs& b, SparseMatrixBase<DestDerived> &aDest) const { eigen_assert(rows()==b.rows()); Index rhsCols = b.cols(); Index size = b.rows(); DestDerived& dest(aDest.derived()); typedef typename DestDerived::Scalar DestScalar; Eigen::Matrix<DestScalar,Dynamic,1> tb(size); Eigen::Matrix<DestScalar,Dynamic,1> tx(cols()); // We do not directly fill dest because sparse expressions have to be free of aliasing issue. // For non square least-square problems, b and dest might not have the same size whereas they might alias each-other. typename DestDerived::PlainObject tmp(cols(),rhsCols); for(Index k=0; k<rhsCols; ++k) { tb = b.col(k); tx = derived().solve(tb); tmp.col(k) = tx.sparseView(0); } dest.swap(tmp); } protected: void init() { m_isInitialized = false; m_analysisIsOk = false; m_factorizationIsOk = false; m_maxIterations = -1; m_tolerance = NumTraits<Scalar>::epsilon(); } typedef internal::generic_matrix_wrapper<MatrixType> MatrixWrapper; typedef typename MatrixWrapper::ActualMatrixType ActualMatrixType; const ActualMatrixType& matrix() const { return m_matrixWrapper.matrix(); } template<typename InputType> void grab(const InputType &A) { m_matrixWrapper.grab(A); } MatrixWrapper m_matrixWrapper; Preconditioner m_preconditioner; Index m_maxIterations; RealScalar m_tolerance; mutable RealScalar m_error; mutable Index m_iterations; mutable ComputationInfo m_info; mutable bool m_analysisIsOk, m_factorizationIsOk; }; } // end namespace Eigen #endif // EIGEN_ITERATIVE_SOLVER_BASE_H
11,527
28.18481
121
h
abess
abess-master/python/include/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2015 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H #define EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H namespace Eigen { namespace internal { /** \internal Low-level conjugate gradient algorithm for least-square problems * \param mat The matrix A * \param rhs The right hand side vector b * \param x On input and initial solution, on output the computed solution. * \param precond A preconditioner being able to efficiently solve for an * approximation of A'Ax=b (regardless of b) * \param iters On input the max number of iteration, on output the number of performed iterations. * \param tol_error On input the tolerance error, on output an estimation of the relative error. */ template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner> EIGEN_DONT_INLINE void least_square_conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x, const Preconditioner& precond, Index& iters, typename Dest::RealScalar& tol_error) { using std::sqrt; using std::abs; typedef typename Dest::RealScalar RealScalar; typedef typename Dest::Scalar Scalar; typedef Matrix<Scalar,Dynamic,1> VectorType; RealScalar tol = tol_error; Index maxIters = iters; Index m = mat.rows(), n = mat.cols(); VectorType residual = rhs - mat * x; VectorType normal_residual = mat.adjoint() * residual; RealScalar rhsNorm2 = (mat.adjoint()*rhs).squaredNorm(); if(rhsNorm2 == 0) { x.setZero(); iters = 0; tol_error = 0; return; } RealScalar threshold = tol*tol*rhsNorm2; RealScalar residualNorm2 = normal_residual.squaredNorm(); if (residualNorm2 < threshold) { iters = 0; tol_error = sqrt(residualNorm2 / rhsNorm2); return; } VectorType p(n); p = precond.solve(normal_residual); // initial search direction VectorType z(n), tmp(m); RealScalar absNew = numext::real(normal_residual.dot(p)); // the square of the absolute value of r scaled by invM Index i = 0; while(i < maxIters) { tmp.noalias() = mat * p; Scalar alpha = absNew / tmp.squaredNorm(); // the amount we travel on dir x += alpha * p; // update solution residual -= alpha * tmp; // update residual normal_residual = mat.adjoint() * residual; // update residual of the normal equation residualNorm2 = normal_residual.squaredNorm(); if(residualNorm2 < threshold) break; z = precond.solve(normal_residual); // approximately solve for "A'A z = normal_residual" RealScalar absOld = absNew; absNew = numext::real(normal_residual.dot(z)); // update the absolute value of r RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction p = z + beta * p; // update search direction i++; } tol_error = sqrt(residualNorm2 / rhsNorm2); iters = i; } } template< typename _MatrixType, typename _Preconditioner = LeastSquareDiagonalPreconditioner<typename _MatrixType::Scalar> > class LeastSquaresConjugateGradient; namespace internal { template< typename _MatrixType, typename _Preconditioner> struct traits<LeastSquaresConjugateGradient<_MatrixType,_Preconditioner> > { typedef _MatrixType MatrixType; typedef _Preconditioner Preconditioner; }; } /** \ingroup IterativeLinearSolvers_Module * \brief A conjugate gradient solver for sparse (or dense) least-square problems * * This class allows to solve for A x = b linear problems using an iterative conjugate gradient algorithm. * The matrix A can be non symmetric and rectangular, but the matrix A' A should be positive-definite to guaranty stability. * Otherwise, the SparseLU or SparseQR classes might be preferable. * The matrix A and the vectors x and b can be either dense or sparse. * * \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix. * \tparam _Preconditioner the type of the preconditioner. Default is LeastSquareDiagonalPreconditioner * * \implsparsesolverconcept * * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations * and NumTraits<Scalar>::epsilon() for the tolerance. * * This class can be used as the direct solver classes. Here is a typical usage example: \code int m=1000000, n = 10000; VectorXd x(n), b(m); SparseMatrix<double> A(m,n); // fill A and b LeastSquaresConjugateGradient<SparseMatrix<double> > lscg; lscg.compute(A); x = lscg.solve(b); std::cout << "#iterations: " << lscg.iterations() << std::endl; std::cout << "estimated error: " << lscg.error() << std::endl; // update b, and solve again x = lscg.solve(b); \endcode * * By default the iterations start with x=0 as an initial guess of the solution. * One can control the start using the solveWithGuess() method. * * \sa class ConjugateGradient, SparseLU, SparseQR */ template< typename _MatrixType, typename _Preconditioner> class LeastSquaresConjugateGradient : public IterativeSolverBase<LeastSquaresConjugateGradient<_MatrixType,_Preconditioner> > { typedef IterativeSolverBase<LeastSquaresConjugateGradient> Base; using Base::matrix; using Base::m_error; using Base::m_iterations; using Base::m_info; using Base::m_isInitialized; public: typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef _Preconditioner Preconditioner; public: /** Default constructor. */ LeastSquaresConjugateGradient() : Base() {} /** Initialize the solver with matrix \a A for further \c Ax=b solving. * * This constructor is a shortcut for the default constructor followed * by a call to compute(). * * \warning this class stores a reference to the matrix A as well as some * precomputed values that depend on it. Therefore, if \a A is changed * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ template<typename MatrixDerived> explicit LeastSquaresConjugateGradient(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {} ~LeastSquaresConjugateGradient() {} /** \internal */ template<typename Rhs,typename Dest> void _solve_with_guess_impl(const Rhs& b, Dest& x) const { m_iterations = Base::maxIterations(); m_error = Base::m_tolerance; for(Index j=0; j<b.cols(); ++j) { m_iterations = Base::maxIterations(); m_error = Base::m_tolerance; typename Dest::ColXpr xj(x,j); internal::least_square_conjugate_gradient(matrix(), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error); } m_isInitialized = true; m_info = m_error <= Base::m_tolerance ? Success : NoConvergence; } /** \internal */ using Base::_solve_impl; template<typename Rhs,typename Dest> void _solve_impl(const MatrixBase<Rhs>& b, Dest& x) const { x.setZero(); _solve_with_guess_impl(b.derived(),x); } }; } // end namespace Eigen #endif // EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H
7,762
34.774194
127
h
abess
abess-master/python/include/Eigen/src/LU/Determinant.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Benoit Jacob <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_DETERMINANT_H #define EIGEN_DETERMINANT_H namespace Eigen { namespace internal { template<typename Derived> inline const typename Derived::Scalar bruteforce_det3_helper (const MatrixBase<Derived>& matrix, int a, int b, int c) { return matrix.coeff(0,a) * (matrix.coeff(1,b) * matrix.coeff(2,c) - matrix.coeff(1,c) * matrix.coeff(2,b)); } template<typename Derived> const typename Derived::Scalar bruteforce_det4_helper (const MatrixBase<Derived>& matrix, int j, int k, int m, int n) { return (matrix.coeff(j,0) * matrix.coeff(k,1) - matrix.coeff(k,0) * matrix.coeff(j,1)) * (matrix.coeff(m,2) * matrix.coeff(n,3) - matrix.coeff(n,2) * matrix.coeff(m,3)); } template<typename Derived, int DeterminantType = Derived::RowsAtCompileTime > struct determinant_impl { static inline typename traits<Derived>::Scalar run(const Derived& m) { if(Derived::ColsAtCompileTime==Dynamic && m.rows()==0) return typename traits<Derived>::Scalar(1); return m.partialPivLu().determinant(); } }; template<typename Derived> struct determinant_impl<Derived, 1> { static inline typename traits<Derived>::Scalar run(const Derived& m) { return m.coeff(0,0); } }; template<typename Derived> struct determinant_impl<Derived, 2> { static inline typename traits<Derived>::Scalar run(const Derived& m) { return m.coeff(0,0) * m.coeff(1,1) - m.coeff(1,0) * m.coeff(0,1); } }; template<typename Derived> struct determinant_impl<Derived, 3> { static inline typename traits<Derived>::Scalar run(const Derived& m) { return bruteforce_det3_helper(m,0,1,2) - bruteforce_det3_helper(m,1,0,2) + bruteforce_det3_helper(m,2,0,1); } }; template<typename Derived> struct determinant_impl<Derived, 4> { static typename traits<Derived>::Scalar run(const Derived& m) { // trick by Martin Costabel to compute 4x4 det with only 30 muls return bruteforce_det4_helper(m,0,1,2,3) - bruteforce_det4_helper(m,0,2,1,3) + bruteforce_det4_helper(m,0,3,1,2) + bruteforce_det4_helper(m,1,2,0,3) - bruteforce_det4_helper(m,1,3,0,2) + bruteforce_det4_helper(m,2,3,0,1); } }; } // end namespace internal /** \lu_module * * \returns the determinant of this matrix */ template<typename Derived> inline typename internal::traits<Derived>::Scalar MatrixBase<Derived>::determinant() const { eigen_assert(rows() == cols()); typedef typename internal::nested_eval<Derived,Base::RowsAtCompileTime>::type Nested; return internal::determinant_impl<typename internal::remove_all<Nested>::type>::run(derived()); } } // end namespace Eigen #endif // EIGEN_DETERMINANT_H
3,057
28.980392
97
h
abess
abess-master/python/include/Eigen/src/QR/HouseholderQR_LAPACKE.h
/* Copyright (c) 2011, Intel Corporation. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************** * Content : Eigen bindings to LAPACKe * Householder QR decomposition of a matrix w/o pivoting based on * LAPACKE_?geqrf function. ******************************************************************************** */ #ifndef EIGEN_QR_LAPACKE_H #define EIGEN_QR_LAPACKE_H namespace Eigen { namespace internal { /** \internal Specialization for the data types supported by LAPACKe */ #define EIGEN_LAPACKE_QR_NOPIV(EIGTYPE, LAPACKE_TYPE, LAPACKE_PREFIX) \ template<typename MatrixQR, typename HCoeffs> \ struct householder_qr_inplace_blocked<MatrixQR, HCoeffs, EIGTYPE, true> \ { \ static void run(MatrixQR& mat, HCoeffs& hCoeffs, Index = 32, \ typename MatrixQR::Scalar* = 0) \ { \ lapack_int m = (lapack_int) mat.rows(); \ lapack_int n = (lapack_int) mat.cols(); \ lapack_int lda = (lapack_int) mat.outerStride(); \ lapack_int matrix_order = (MatrixQR::IsRowMajor) ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \ LAPACKE_##LAPACKE_PREFIX##geqrf( matrix_order, m, n, (LAPACKE_TYPE*)mat.data(), lda, (LAPACKE_TYPE*)hCoeffs.data()); \ hCoeffs.adjointInPlace(); \ } \ }; EIGEN_LAPACKE_QR_NOPIV(double, double, d) EIGEN_LAPACKE_QR_NOPIV(float, float, s) EIGEN_LAPACKE_QR_NOPIV(dcomplex, lapack_complex_double, z) EIGEN_LAPACKE_QR_NOPIV(scomplex, lapack_complex_float, c) } // end namespace internal } // end namespace Eigen #endif // EIGEN_QR_LAPACKE_H
2,993
42.391304
122
h
abess
abess-master/python/include/Eigen/src/SVD/UpperBidiagonalization.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Benoit Jacob <[email protected]> // Copyright (C) 2013-2014 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_BIDIAGONALIZATION_H #define EIGEN_BIDIAGONALIZATION_H namespace Eigen { namespace internal { // UpperBidiagonalization will probably be replaced by a Bidiagonalization class, don't want to make it stable API. // At the same time, it's useful to keep for now as it's about the only thing that is testing the BandMatrix class. template<typename _MatrixType> class UpperBidiagonalization { public: typedef _MatrixType MatrixType; enum { RowsAtCompileTime = MatrixType::RowsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime, ColsAtCompileTimeMinusOne = internal::decrement_size<ColsAtCompileTime>::ret }; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3 typedef Matrix<Scalar, 1, ColsAtCompileTime> RowVectorType; typedef Matrix<Scalar, RowsAtCompileTime, 1> ColVectorType; typedef BandMatrix<RealScalar, ColsAtCompileTime, ColsAtCompileTime, 1, 0, RowMajor> BidiagonalType; typedef Matrix<Scalar, ColsAtCompileTime, 1> DiagVectorType; typedef Matrix<Scalar, ColsAtCompileTimeMinusOne, 1> SuperDiagVectorType; typedef HouseholderSequence< const MatrixType, const typename internal::remove_all<typename Diagonal<const MatrixType,0>::ConjugateReturnType>::type > HouseholderUSequenceType; typedef HouseholderSequence< const typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type, Diagonal<const MatrixType,1>, OnTheRight > HouseholderVSequenceType; /** * \brief Default Constructor. * * The default constructor is useful in cases in which the user intends to * perform decompositions via Bidiagonalization::compute(const MatrixType&). */ UpperBidiagonalization() : m_householder(), m_bidiagonal(), m_isInitialized(false) {} explicit UpperBidiagonalization(const MatrixType& matrix) : m_householder(matrix.rows(), matrix.cols()), m_bidiagonal(matrix.cols(), matrix.cols()), m_isInitialized(false) { compute(matrix); } UpperBidiagonalization& compute(const MatrixType& matrix); UpperBidiagonalization& computeUnblocked(const MatrixType& matrix); const MatrixType& householder() const { return m_householder; } const BidiagonalType& bidiagonal() const { return m_bidiagonal; } const HouseholderUSequenceType householderU() const { eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized."); return HouseholderUSequenceType(m_householder, m_householder.diagonal().conjugate()); } const HouseholderVSequenceType householderV() // const here gives nasty errors and i'm lazy { eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized."); return HouseholderVSequenceType(m_householder.conjugate(), m_householder.const_derived().template diagonal<1>()) .setLength(m_householder.cols()-1) .setShift(1); } protected: MatrixType m_householder; BidiagonalType m_bidiagonal; bool m_isInitialized; }; // Standard upper bidiagonalization without fancy optimizations // This version should be faster for small matrix size template<typename MatrixType> void upperbidiagonalization_inplace_unblocked(MatrixType& mat, typename MatrixType::RealScalar *diagonal, typename MatrixType::RealScalar *upper_diagonal, typename MatrixType::Scalar* tempData = 0) { typedef typename MatrixType::Scalar Scalar; Index rows = mat.rows(); Index cols = mat.cols(); typedef Matrix<Scalar,Dynamic,1,ColMajor,MatrixType::MaxRowsAtCompileTime,1> TempType; TempType tempVector; if(tempData==0) { tempVector.resize(rows); tempData = tempVector.data(); } for (Index k = 0; /* breaks at k==cols-1 below */ ; ++k) { Index remainingRows = rows - k; Index remainingCols = cols - k - 1; // construct left householder transform in-place in A mat.col(k).tail(remainingRows) .makeHouseholderInPlace(mat.coeffRef(k,k), diagonal[k]); // apply householder transform to remaining part of A on the left mat.bottomRightCorner(remainingRows, remainingCols) .applyHouseholderOnTheLeft(mat.col(k).tail(remainingRows-1), mat.coeff(k,k), tempData); if(k == cols-1) break; // construct right householder transform in-place in mat mat.row(k).tail(remainingCols) .makeHouseholderInPlace(mat.coeffRef(k,k+1), upper_diagonal[k]); // apply householder transform to remaining part of mat on the left mat.bottomRightCorner(remainingRows-1, remainingCols) .applyHouseholderOnTheRight(mat.row(k).tail(remainingCols-1).transpose(), mat.coeff(k,k+1), tempData); } } /** \internal * Helper routine for the block reduction to upper bidiagonal form. * * Let's partition the matrix A: * * | A00 A01 | * A = | | * | A10 A11 | * * This function reduces to bidiagonal form the left \c rows x \a blockSize vertical panel [A00/A10] * and the \a blockSize x \c cols horizontal panel [A00 A01] of the matrix \a A. The bottom-right block A11 * is updated using matrix-matrix products: * A22 -= V * Y^T - X * U^T * where V and U contains the left and right Householder vectors. U and V are stored in A10, and A01 * respectively, and the update matrices X and Y are computed during the reduction. * */ template<typename MatrixType> void upperbidiagonalization_blocked_helper(MatrixType& A, typename MatrixType::RealScalar *diagonal, typename MatrixType::RealScalar *upper_diagonal, Index bs, Ref<Matrix<typename MatrixType::Scalar, Dynamic, Dynamic, traits<MatrixType>::Flags & RowMajorBit> > X, Ref<Matrix<typename MatrixType::Scalar, Dynamic, Dynamic, traits<MatrixType>::Flags & RowMajorBit> > Y) { typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef typename NumTraits<RealScalar>::Literal Literal; enum { StorageOrder = traits<MatrixType>::Flags & RowMajorBit }; typedef InnerStride<int(StorageOrder) == int(ColMajor) ? 1 : Dynamic> ColInnerStride; typedef InnerStride<int(StorageOrder) == int(ColMajor) ? Dynamic : 1> RowInnerStride; typedef Ref<Matrix<Scalar, Dynamic, 1>, 0, ColInnerStride> SubColumnType; typedef Ref<Matrix<Scalar, 1, Dynamic>, 0, RowInnerStride> SubRowType; typedef Ref<Matrix<Scalar, Dynamic, Dynamic, StorageOrder > > SubMatType; Index brows = A.rows(); Index bcols = A.cols(); Scalar tau_u, tau_u_prev(0), tau_v; for(Index k = 0; k < bs; ++k) { Index remainingRows = brows - k; Index remainingCols = bcols - k - 1; SubMatType X_k1( X.block(k,0, remainingRows,k) ); SubMatType V_k1( A.block(k,0, remainingRows,k) ); // 1 - update the k-th column of A SubColumnType v_k = A.col(k).tail(remainingRows); v_k -= V_k1 * Y.row(k).head(k).adjoint(); if(k) v_k -= X_k1 * A.col(k).head(k); // 2 - construct left Householder transform in-place v_k.makeHouseholderInPlace(tau_v, diagonal[k]); if(k+1<bcols) { SubMatType Y_k ( Y.block(k+1,0, remainingCols, k+1) ); SubMatType U_k1 ( A.block(0,k+1, k,remainingCols) ); // this eases the application of Householder transforAions // A(k,k) will store tau_v later A(k,k) = Scalar(1); // 3 - Compute y_k^T = tau_v * ( A^T*v_k - Y_k-1*V_k-1^T*v_k - U_k-1*X_k-1^T*v_k ) { SubColumnType y_k( Y.col(k).tail(remainingCols) ); // let's use the begining of column k of Y as a temporary vector SubColumnType tmp( Y.col(k).head(k) ); y_k.noalias() = A.block(k,k+1, remainingRows,remainingCols).adjoint() * v_k; // bottleneck tmp.noalias() = V_k1.adjoint() * v_k; y_k.noalias() -= Y_k.leftCols(k) * tmp; tmp.noalias() = X_k1.adjoint() * v_k; y_k.noalias() -= U_k1.adjoint() * tmp; y_k *= numext::conj(tau_v); } // 4 - update k-th row of A (it will become u_k) SubRowType u_k( A.row(k).tail(remainingCols) ); u_k = u_k.conjugate(); { u_k -= Y_k * A.row(k).head(k+1).adjoint(); if(k) u_k -= U_k1.adjoint() * X.row(k).head(k).adjoint(); } // 5 - construct right Householder transform in-place u_k.makeHouseholderInPlace(tau_u, upper_diagonal[k]); // this eases the application of Householder transformations // A(k,k+1) will store tau_u later A(k,k+1) = Scalar(1); // 6 - Compute x_k = tau_u * ( A*u_k - X_k-1*U_k-1^T*u_k - V_k*Y_k^T*u_k ) { SubColumnType x_k ( X.col(k).tail(remainingRows-1) ); // let's use the begining of column k of X as a temporary vectors // note that tmp0 and tmp1 overlaps SubColumnType tmp0 ( X.col(k).head(k) ), tmp1 ( X.col(k).head(k+1) ); x_k.noalias() = A.block(k+1,k+1, remainingRows-1,remainingCols) * u_k.transpose(); // bottleneck tmp0.noalias() = U_k1 * u_k.transpose(); x_k.noalias() -= X_k1.bottomRows(remainingRows-1) * tmp0; tmp1.noalias() = Y_k.adjoint() * u_k.transpose(); x_k.noalias() -= A.block(k+1,0, remainingRows-1,k+1) * tmp1; x_k *= numext::conj(tau_u); tau_u = numext::conj(tau_u); u_k = u_k.conjugate(); } if(k>0) A.coeffRef(k-1,k) = tau_u_prev; tau_u_prev = tau_u; } else A.coeffRef(k-1,k) = tau_u_prev; A.coeffRef(k,k) = tau_v; } if(bs<bcols) A.coeffRef(bs-1,bs) = tau_u_prev; // update A22 if(bcols>bs && brows>bs) { SubMatType A11( A.bottomRightCorner(brows-bs,bcols-bs) ); SubMatType A10( A.block(bs,0, brows-bs,bs) ); SubMatType A01( A.block(0,bs, bs,bcols-bs) ); Scalar tmp = A01(bs-1,0); A01(bs-1,0) = Literal(1); A11.noalias() -= A10 * Y.topLeftCorner(bcols,bs).bottomRows(bcols-bs).adjoint(); A11.noalias() -= X.topLeftCorner(brows,bs).bottomRows(brows-bs) * A01; A01(bs-1,0) = tmp; } } /** \internal * * Implementation of a block-bidiagonal reduction. * It is based on the following paper: * The Design of a Parallel Dense Linear Algebra Software Library: Reduction to Hessenberg, Tridiagonal, and Bidiagonal Form. * by Jaeyoung Choi, Jack J. Dongarra, David W. Walker. (1995) * section 3.3 */ template<typename MatrixType, typename BidiagType> void upperbidiagonalization_inplace_blocked(MatrixType& A, BidiagType& bidiagonal, Index maxBlockSize=32, typename MatrixType::Scalar* /*tempData*/ = 0) { typedef typename MatrixType::Scalar Scalar; typedef Block<MatrixType,Dynamic,Dynamic> BlockType; Index rows = A.rows(); Index cols = A.cols(); Index size = (std::min)(rows, cols); // X and Y are work space enum { StorageOrder = traits<MatrixType>::Flags & RowMajorBit }; Matrix<Scalar, MatrixType::RowsAtCompileTime, Dynamic, StorageOrder, MatrixType::MaxRowsAtCompileTime> X(rows,maxBlockSize); Matrix<Scalar, MatrixType::ColsAtCompileTime, Dynamic, StorageOrder, MatrixType::MaxColsAtCompileTime> Y(cols,maxBlockSize); Index blockSize = (std::min)(maxBlockSize,size); Index k = 0; for(k = 0; k < size; k += blockSize) { Index bs = (std::min)(size-k,blockSize); // actual size of the block Index brows = rows - k; // rows of the block Index bcols = cols - k; // columns of the block // partition the matrix A: // // | A00 A01 A02 | // | | // A = | A10 A11 A12 | // | | // | A20 A21 A22 | // // where A11 is a bs x bs diagonal block, // and let: // | A11 A12 | // B = | | // | A21 A22 | BlockType B = A.block(k,k,brows,bcols); // This stage performs the bidiagonalization of A11, A21, A12, and updating of A22. // Finally, the algorithm continue on the updated A22. // // However, if B is too small, or A22 empty, then let's use an unblocked strategy if(k+bs==cols || bcols<48) // somewhat arbitrary threshold { upperbidiagonalization_inplace_unblocked(B, &(bidiagonal.template diagonal<0>().coeffRef(k)), &(bidiagonal.template diagonal<1>().coeffRef(k)), X.data() ); break; // We're done } else { upperbidiagonalization_blocked_helper<BlockType>( B, &(bidiagonal.template diagonal<0>().coeffRef(k)), &(bidiagonal.template diagonal<1>().coeffRef(k)), bs, X.topLeftCorner(brows,bs), Y.topLeftCorner(bcols,bs) ); } } } template<typename _MatrixType> UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::computeUnblocked(const _MatrixType& matrix) { Index rows = matrix.rows(); Index cols = matrix.cols(); EIGEN_ONLY_USED_FOR_DEBUG(cols); eigen_assert(rows >= cols && "UpperBidiagonalization is only for Arices satisfying rows>=cols."); m_householder = matrix; ColVectorType temp(rows); upperbidiagonalization_inplace_unblocked(m_householder, &(m_bidiagonal.template diagonal<0>().coeffRef(0)), &(m_bidiagonal.template diagonal<1>().coeffRef(0)), temp.data()); m_isInitialized = true; return *this; } template<typename _MatrixType> UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::compute(const _MatrixType& matrix) { Index rows = matrix.rows(); Index cols = matrix.cols(); EIGEN_ONLY_USED_FOR_DEBUG(rows); EIGEN_ONLY_USED_FOR_DEBUG(cols); eigen_assert(rows >= cols && "UpperBidiagonalization is only for Arices satisfying rows>=cols."); m_householder = matrix; upperbidiagonalization_inplace_blocked(m_householder, m_bidiagonal); m_isInitialized = true; return *this; } #if 0 /** \return the Householder QR decomposition of \c *this. * * \sa class Bidiagonalization */ template<typename Derived> const UpperBidiagonalization<typename MatrixBase<Derived>::PlainObject> MatrixBase<Derived>::bidiagonalization() const { return UpperBidiagonalization<PlainObject>(eval()); } #endif } // end namespace internal } // end namespace Eigen #endif // EIGEN_BIDIAGONALIZATION_H
15,957
37.453012
128
h
abess
abess-master/python/include/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2012 Gael Guennebaud <[email protected]> /* NOTE: thes functions vave been adapted from the LDL library: LDL Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved. LDL License: Your use or distribution of LDL or any modified version of LDL implies that you agree to this License. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Permission is hereby granted to use or copy this program under the terms of the GNU LGPL, provided that the Copyright, this License, and the Availability of the original version is retained on all copies. User documentation of any code that uses this code or any modified version of this code must cite the Copyright, this License, the Availability note, and "Used by permission." Permission to modify the code and to distribute modified code is granted, provided the Copyright, this License, and the Availability note are retained, and a notice that the code was modified is included. */ #include "../Core/util/NonMPL2.h" #ifndef EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H #define EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H namespace Eigen { template<typename Derived> void SimplicialCholeskyBase<Derived>::analyzePattern_preordered(const CholMatrixType& ap, bool doLDLT) { const StorageIndex size = StorageIndex(ap.rows()); m_matrix.resize(size, size); m_parent.resize(size); m_nonZerosPerCol.resize(size); ei_declare_aligned_stack_constructed_variable(StorageIndex, tags, size, 0); for(StorageIndex k = 0; k < size; ++k) { /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */ m_parent[k] = -1; /* parent of k is not yet known */ tags[k] = k; /* mark node k as visited */ m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */ for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it) { StorageIndex i = it.index(); if(i < k) { /* follow path from i to root of etree, stop at flagged node */ for(; tags[i] != k; i = m_parent[i]) { /* find parent of i if not yet determined */ if (m_parent[i] == -1) m_parent[i] = k; m_nonZerosPerCol[i]++; /* L (k,i) is nonzero */ tags[i] = k; /* mark i as visited */ } } } } /* construct Lp index array from m_nonZerosPerCol column counts */ StorageIndex* Lp = m_matrix.outerIndexPtr(); Lp[0] = 0; for(StorageIndex k = 0; k < size; ++k) Lp[k+1] = Lp[k] + m_nonZerosPerCol[k] + (doLDLT ? 0 : 1); m_matrix.resizeNonZeros(Lp[size]); m_isInitialized = true; m_info = Success; m_analysisIsOk = true; m_factorizationIsOk = false; } template<typename Derived> template<bool DoLDLT> void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType& ap) { using std::sqrt; eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); eigen_assert(ap.rows()==ap.cols()); eigen_assert(m_parent.size()==ap.rows()); eigen_assert(m_nonZerosPerCol.size()==ap.rows()); const StorageIndex size = StorageIndex(ap.rows()); const StorageIndex* Lp = m_matrix.outerIndexPtr(); StorageIndex* Li = m_matrix.innerIndexPtr(); Scalar* Lx = m_matrix.valuePtr(); ei_declare_aligned_stack_constructed_variable(Scalar, y, size, 0); ei_declare_aligned_stack_constructed_variable(StorageIndex, pattern, size, 0); ei_declare_aligned_stack_constructed_variable(StorageIndex, tags, size, 0); bool ok = true; m_diag.resize(DoLDLT ? size : 0); for(StorageIndex k = 0; k < size; ++k) { // compute nonzero pattern of kth row of L, in topological order y[k] = 0.0; // Y(0:k) is now all zero StorageIndex top = size; // stack for pattern is empty tags[k] = k; // mark node k as visited m_nonZerosPerCol[k] = 0; // count of nonzeros in column k of L for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it) { StorageIndex i = it.index(); if(i <= k) { y[i] += numext::conj(it.value()); /* scatter A(i,k) into Y (sum duplicates) */ Index len; for(len = 0; tags[i] != k; i = m_parent[i]) { pattern[len++] = i; /* L(k,i) is nonzero */ tags[i] = k; /* mark i as visited */ } while(len > 0) pattern[--top] = pattern[--len]; } } /* compute numerical values kth row of L (a sparse triangular solve) */ RealScalar d = numext::real(y[k]) * m_shiftScale + m_shiftOffset; // get D(k,k), apply the shift function, and clear Y(k) y[k] = 0.0; for(; top < size; ++top) { Index i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */ Scalar yi = y[i]; /* get and clear Y(i) */ y[i] = 0.0; /* the nonzero entry L(k,i) */ Scalar l_ki; if(DoLDLT) l_ki = yi / m_diag[i]; else yi = l_ki = yi / Lx[Lp[i]]; Index p2 = Lp[i] + m_nonZerosPerCol[i]; Index p; for(p = Lp[i] + (DoLDLT ? 0 : 1); p < p2; ++p) y[Li[p]] -= numext::conj(Lx[p]) * yi; d -= numext::real(l_ki * numext::conj(yi)); Li[p] = k; /* store L(k,i) in column form of L */ Lx[p] = l_ki; ++m_nonZerosPerCol[i]; /* increment count of nonzeros in col i */ } if(DoLDLT) { m_diag[k] = d; if(d == RealScalar(0)) { ok = false; /* failure, D(k,k) is zero */ break; } } else { Index p = Lp[k] + m_nonZerosPerCol[k]++; Li[p] = k ; /* store L(k,k) = sqrt (d) in column k */ if(d <= RealScalar(0)) { ok = false; /* failure, matrix is not positive definite */ break; } Lx[p] = sqrt(d) ; } } m_info = ok ? Success : NumericalIssue; m_factorizationIsOk = true; } } // end namespace Eigen #endif // EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H
6,898
33.495
128
h
abess
abess-master/python/include/Eigen/src/SparseCore/CompressedStorage.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2014 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_COMPRESSED_STORAGE_H #define EIGEN_COMPRESSED_STORAGE_H namespace Eigen { namespace internal { /** \internal * Stores a sparse set of values as a list of values and a list of indices. * */ template<typename _Scalar,typename _StorageIndex> class CompressedStorage { public: typedef _Scalar Scalar; typedef _StorageIndex StorageIndex; protected: typedef typename NumTraits<Scalar>::Real RealScalar; public: CompressedStorage() : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0) {} explicit CompressedStorage(Index size) : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0) { resize(size); } CompressedStorage(const CompressedStorage& other) : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0) { *this = other; } CompressedStorage& operator=(const CompressedStorage& other) { resize(other.size()); if(other.size()>0) { internal::smart_copy(other.m_values, other.m_values + m_size, m_values); internal::smart_copy(other.m_indices, other.m_indices + m_size, m_indices); } return *this; } void swap(CompressedStorage& other) { std::swap(m_values, other.m_values); std::swap(m_indices, other.m_indices); std::swap(m_size, other.m_size); std::swap(m_allocatedSize, other.m_allocatedSize); } ~CompressedStorage() { delete[] m_values; delete[] m_indices; } void reserve(Index size) { Index newAllocatedSize = m_size + size; if (newAllocatedSize > m_allocatedSize) reallocate(newAllocatedSize); } void squeeze() { if (m_allocatedSize>m_size) reallocate(m_size); } void resize(Index size, double reserveSizeFactor = 0) { if (m_allocatedSize<size) { Index realloc_size = (std::min<Index>)(NumTraits<StorageIndex>::highest(), size + Index(reserveSizeFactor*double(size))); if(realloc_size<size) internal::throw_std_bad_alloc(); reallocate(realloc_size); } m_size = size; } void append(const Scalar& v, Index i) { Index id = m_size; resize(m_size+1, 1); m_values[id] = v; m_indices[id] = internal::convert_index<StorageIndex>(i); } inline Index size() const { return m_size; } inline Index allocatedSize() const { return m_allocatedSize; } inline void clear() { m_size = 0; } const Scalar* valuePtr() const { return m_values; } Scalar* valuePtr() { return m_values; } const StorageIndex* indexPtr() const { return m_indices; } StorageIndex* indexPtr() { return m_indices; } inline Scalar& value(Index i) { eigen_internal_assert(m_values!=0); return m_values[i]; } inline const Scalar& value(Index i) const { eigen_internal_assert(m_values!=0); return m_values[i]; } inline StorageIndex& index(Index i) { eigen_internal_assert(m_indices!=0); return m_indices[i]; } inline const StorageIndex& index(Index i) const { eigen_internal_assert(m_indices!=0); return m_indices[i]; } /** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */ inline Index searchLowerIndex(Index key) const { return searchLowerIndex(0, m_size, key); } /** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */ inline Index searchLowerIndex(Index start, Index end, Index key) const { while(end>start) { Index mid = (end+start)>>1; if (m_indices[mid]<key) start = mid+1; else end = mid; } return start; } /** \returns the stored value at index \a key * If the value does not exist, then the value \a defaultValue is returned without any insertion. */ inline Scalar at(Index key, const Scalar& defaultValue = Scalar(0)) const { if (m_size==0) return defaultValue; else if (key==m_indices[m_size-1]) return m_values[m_size-1]; // ^^ optimization: let's first check if it is the last coefficient // (very common in high level algorithms) const Index id = searchLowerIndex(0,m_size-1,key); return ((id<m_size) && (m_indices[id]==key)) ? m_values[id] : defaultValue; } /** Like at(), but the search is performed in the range [start,end) */ inline Scalar atInRange(Index start, Index end, Index key, const Scalar &defaultValue = Scalar(0)) const { if (start>=end) return defaultValue; else if (end>start && key==m_indices[end-1]) return m_values[end-1]; // ^^ optimization: let's first check if it is the last coefficient // (very common in high level algorithms) const Index id = searchLowerIndex(start,end-1,key); return ((id<end) && (m_indices[id]==key)) ? m_values[id] : defaultValue; } /** \returns a reference to the value at index \a key * If the value does not exist, then the value \a defaultValue is inserted * such that the keys are sorted. */ inline Scalar& atWithInsertion(Index key, const Scalar& defaultValue = Scalar(0)) { Index id = searchLowerIndex(0,m_size,key); if (id>=m_size || m_indices[id]!=key) { if (m_allocatedSize<m_size+1) { m_allocatedSize = 2*(m_size+1); internal::scoped_array<Scalar> newValues(m_allocatedSize); internal::scoped_array<StorageIndex> newIndices(m_allocatedSize); // copy first chunk internal::smart_copy(m_values, m_values +id, newValues.ptr()); internal::smart_copy(m_indices, m_indices+id, newIndices.ptr()); // copy the rest if(m_size>id) { internal::smart_copy(m_values +id, m_values +m_size, newValues.ptr() +id+1); internal::smart_copy(m_indices+id, m_indices+m_size, newIndices.ptr()+id+1); } std::swap(m_values,newValues.ptr()); std::swap(m_indices,newIndices.ptr()); } else if(m_size>id) { internal::smart_memmove(m_values +id, m_values +m_size, m_values +id+1); internal::smart_memmove(m_indices+id, m_indices+m_size, m_indices+id+1); } m_size++; m_indices[id] = internal::convert_index<StorageIndex>(key); m_values[id] = defaultValue; } return m_values[id]; } void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision()) { Index k = 0; Index n = size(); for (Index i=0; i<n; ++i) { if (!internal::isMuchSmallerThan(value(i), reference, epsilon)) { value(k) = value(i); index(k) = index(i); ++k; } } resize(k,0); } protected: inline void reallocate(Index size) { #ifdef EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN #endif eigen_internal_assert(size!=m_allocatedSize); internal::scoped_array<Scalar> newValues(size); internal::scoped_array<StorageIndex> newIndices(size); Index copySize = (std::min)(size, m_size); if (copySize>0) { internal::smart_copy(m_values, m_values+copySize, newValues.ptr()); internal::smart_copy(m_indices, m_indices+copySize, newIndices.ptr()); } std::swap(m_values,newValues.ptr()); std::swap(m_indices,newIndices.ptr()); m_allocatedSize = size; } protected: Scalar* m_values; StorageIndex* m_indices; Index m_size; Index m_allocatedSize; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_COMPRESSED_STORAGE_H
8,164
30.525097
130
h
abess
abess-master/python/include/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2015 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_CWISE_UNARY_OP_H #define EIGEN_SPARSE_CWISE_UNARY_OP_H namespace Eigen { namespace internal { template<typename UnaryOp, typename ArgType> struct unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased> : public evaluator_base<CwiseUnaryOp<UnaryOp,ArgType> > { public: typedef CwiseUnaryOp<UnaryOp, ArgType> XprType; class InnerIterator; enum { CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<UnaryOp>::Cost, Flags = XprType::Flags }; explicit unary_evaluator(const XprType& op) : m_functor(op.functor()), m_argImpl(op.nestedExpression()) { EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } inline Index nonZerosEstimate() const { return m_argImpl.nonZerosEstimate(); } protected: typedef typename evaluator<ArgType>::InnerIterator EvalIterator; const UnaryOp m_functor; evaluator<ArgType> m_argImpl; }; template<typename UnaryOp, typename ArgType> class unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::InnerIterator : public unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::EvalIterator { typedef typename XprType::Scalar Scalar; typedef typename unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::EvalIterator Base; public: EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer) : Base(unaryOp.m_argImpl,outer), m_functor(unaryOp.m_functor) {} EIGEN_STRONG_INLINE InnerIterator& operator++() { Base::operator++(); return *this; } EIGEN_STRONG_INLINE Scalar value() const { return m_functor(Base::value()); } protected: const UnaryOp m_functor; private: Scalar& valueRef(); }; template<typename ViewOp, typename ArgType> struct unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased> : public evaluator_base<CwiseUnaryView<ViewOp,ArgType> > { public: typedef CwiseUnaryView<ViewOp, ArgType> XprType; class InnerIterator; enum { CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<ViewOp>::Cost, Flags = XprType::Flags }; explicit unary_evaluator(const XprType& op) : m_functor(op.functor()), m_argImpl(op.nestedExpression()) { EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<ViewOp>::Cost); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } protected: typedef typename evaluator<ArgType>::InnerIterator EvalIterator; const ViewOp m_functor; evaluator<ArgType> m_argImpl; }; template<typename ViewOp, typename ArgType> class unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::InnerIterator : public unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::EvalIterator { typedef typename XprType::Scalar Scalar; typedef typename unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::EvalIterator Base; public: EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer) : Base(unaryOp.m_argImpl,outer), m_functor(unaryOp.m_functor) {} EIGEN_STRONG_INLINE InnerIterator& operator++() { Base::operator++(); return *this; } EIGEN_STRONG_INLINE Scalar value() const { return m_functor(Base::value()); } EIGEN_STRONG_INLINE Scalar& valueRef() { return m_functor(Base::valueRef()); } protected: const ViewOp m_functor; }; } // end namespace internal template<typename Derived> EIGEN_STRONG_INLINE Derived& SparseMatrixBase<Derived>::operator*=(const Scalar& other) { typedef typename internal::evaluator<Derived>::InnerIterator EvalIterator; internal::evaluator<Derived> thisEval(derived()); for (Index j=0; j<outerSize(); ++j) for (EvalIterator i(thisEval,j); i; ++i) i.valueRef() *= other; return derived(); } template<typename Derived> EIGEN_STRONG_INLINE Derived& SparseMatrixBase<Derived>::operator/=(const Scalar& other) { typedef typename internal::evaluator<Derived>::InnerIterator EvalIterator; internal::evaluator<Derived> thisEval(derived()); for (Index j=0; j<outerSize(); ++j) for (EvalIterator i(thisEval,j); i; ++i) i.valueRef() /= other; return derived(); } } // end namespace Eigen #endif // EIGEN_SPARSE_CWISE_UNARY_OP_H
4,711
30.624161
107
h
abess
abess-master/python/include/Eigen/src/SparseCore/SparseDot.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_DOT_H #define EIGEN_SPARSE_DOT_H namespace Eigen { template<typename Derived> template<typename OtherDerived> typename internal::traits<Derived>::Scalar SparseMatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) eigen_assert(size() == other.size()); eigen_assert(other.size()>0 && "you are using a non initialized vector"); internal::evaluator<Derived> thisEval(derived()); typename internal::evaluator<Derived>::InnerIterator i(thisEval, 0); Scalar res(0); while (i) { res += numext::conj(i.value()) * other.coeff(i.index()); ++i; } return res; } template<typename Derived> template<typename OtherDerived> typename internal::traits<Derived>::Scalar SparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) const { EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) eigen_assert(size() == other.size()); internal::evaluator<Derived> thisEval(derived()); typename internal::evaluator<Derived>::InnerIterator i(thisEval, 0); internal::evaluator<OtherDerived> otherEval(other.derived()); typename internal::evaluator<OtherDerived>::InnerIterator j(otherEval, 0); Scalar res(0); while (i && j) { if (i.index()==j.index()) { res += numext::conj(i.value()) * j.value(); ++i; ++j; } else if (i.index()<j.index()) ++i; else ++j; } return res; } template<typename Derived> inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real SparseMatrixBase<Derived>::squaredNorm() const { return numext::real((*this).cwiseAbs2().sum()); } template<typename Derived> inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real SparseMatrixBase<Derived>::norm() const { using std::sqrt; return sqrt(squaredNorm()); } template<typename Derived> inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real SparseMatrixBase<Derived>::blueNorm() const { return internal::blueNorm_impl(*this); } } // end namespace Eigen #endif // EIGEN_SPARSE_DOT_H
3,080
30.121212
118
h
abess
abess-master/python/include/Eigen/src/SparseCore/SparseFuzzy.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2014 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_FUZZY_H #define EIGEN_SPARSE_FUZZY_H namespace Eigen { template<typename Derived> template<typename OtherDerived> bool SparseMatrixBase<Derived>::isApprox(const SparseMatrixBase<OtherDerived>& other, const RealScalar &prec) const { const typename internal::nested_eval<Derived,2,PlainObject>::type actualA(derived()); typename internal::conditional<bool(IsRowMajor)==bool(OtherDerived::IsRowMajor), const typename internal::nested_eval<OtherDerived,2,PlainObject>::type, const PlainObject>::type actualB(other.derived()); return (actualA - actualB).squaredNorm() <= prec * prec * numext::mini(actualA.squaredNorm(), actualB.squaredNorm()); } } // end namespace Eigen #endif // EIGEN_SPARSE_FUZZY_H
1,107
35.933333
119
h
abess
abess-master/python/include/Eigen/src/SparseCore/SparseRedux.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2014 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEREDUX_H #define EIGEN_SPARSEREDUX_H namespace Eigen { template<typename Derived> typename internal::traits<Derived>::Scalar SparseMatrixBase<Derived>::sum() const { eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix"); Scalar res(0); internal::evaluator<Derived> thisEval(derived()); for (Index j=0; j<outerSize(); ++j) for (typename internal::evaluator<Derived>::InnerIterator iter(thisEval,j); iter; ++iter) res += iter.value(); return res; } template<typename _Scalar, int _Options, typename _Index> typename internal::traits<SparseMatrix<_Scalar,_Options,_Index> >::Scalar SparseMatrix<_Scalar,_Options,_Index>::sum() const { eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix"); if(this->isCompressed()) return Matrix<Scalar,1,Dynamic>::Map(m_data.valuePtr(), m_data.size()).sum(); else return Base::sum(); } template<typename _Scalar, int _Options, typename _Index> typename internal::traits<SparseVector<_Scalar,_Options, _Index> >::Scalar SparseVector<_Scalar,_Options,_Index>::sum() const { eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix"); return Matrix<Scalar,1,Dynamic>::Map(m_data.valuePtr(), m_data.size()).sum(); } } // end namespace Eigen #endif // EIGEN_SPARSEREDUX_H
1,699
33
93
h
abess
abess-master/python/include/Eigen/src/SparseCore/SparseSolverBase.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSESOLVERBASE_H #define EIGEN_SPARSESOLVERBASE_H namespace Eigen { namespace internal { /** \internal * Helper functions to solve with a sparse right-hand-side and result. * The rhs is decomposed into small vertical panels which are solved through dense temporaries. */ template<typename Decomposition, typename Rhs, typename Dest> typename enable_if<Rhs::ColsAtCompileTime!=1 && Dest::ColsAtCompileTime!=1>::type solve_sparse_through_dense_panels(const Decomposition &dec, const Rhs& rhs, Dest &dest) { EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); typedef typename Dest::Scalar DestScalar; // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix. static const Index NbColsAtOnce = 4; Index rhsCols = rhs.cols(); Index size = rhs.rows(); // the temporary matrices do not need more columns than NbColsAtOnce: Index tmpCols = (std::min)(rhsCols, NbColsAtOnce); Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmp(size,tmpCols); Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmpX(size,tmpCols); for(Index k=0; k<rhsCols; k+=NbColsAtOnce) { Index actualCols = std::min<Index>(rhsCols-k, NbColsAtOnce); tmp.leftCols(actualCols) = rhs.middleCols(k,actualCols); tmpX.leftCols(actualCols) = dec.solve(tmp.leftCols(actualCols)); dest.middleCols(k,actualCols) = tmpX.leftCols(actualCols).sparseView(); } } // Overload for vector as rhs template<typename Decomposition, typename Rhs, typename Dest> typename enable_if<Rhs::ColsAtCompileTime==1 || Dest::ColsAtCompileTime==1>::type solve_sparse_through_dense_panels(const Decomposition &dec, const Rhs& rhs, Dest &dest) { typedef typename Dest::Scalar DestScalar; Index size = rhs.rows(); Eigen::Matrix<DestScalar,Dynamic,1> rhs_dense(rhs); Eigen::Matrix<DestScalar,Dynamic,1> dest_dense(size); dest_dense = dec.solve(rhs_dense); dest = dest_dense.sparseView(); } } // end namespace internal /** \class SparseSolverBase * \ingroup SparseCore_Module * \brief A base class for sparse solvers * * \tparam Derived the actual type of the solver. * */ template<typename Derived> class SparseSolverBase : internal::noncopyable { public: /** Default constructor */ SparseSolverBase() : m_isInitialized(false) {} ~SparseSolverBase() {} Derived& derived() { return *static_cast<Derived*>(this); } const Derived& derived() const { return *static_cast<const Derived*>(this); } /** \returns an expression of the solution x of \f$ A x = b \f$ using the current decomposition of A. * * \sa compute() */ template<typename Rhs> inline const Solve<Derived, Rhs> solve(const MatrixBase<Rhs>& b) const { eigen_assert(m_isInitialized && "Solver is not initialized."); eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b"); return Solve<Derived, Rhs>(derived(), b.derived()); } /** \returns an expression of the solution x of \f$ A x = b \f$ using the current decomposition of A. * * \sa compute() */ template<typename Rhs> inline const Solve<Derived, Rhs> solve(const SparseMatrixBase<Rhs>& b) const { eigen_assert(m_isInitialized && "Solver is not initialized."); eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b"); return Solve<Derived, Rhs>(derived(), b.derived()); } #ifndef EIGEN_PARSED_BY_DOXYGEN /** \internal default implementation of solving with a sparse rhs */ template<typename Rhs,typename Dest> void _solve_impl(const SparseMatrixBase<Rhs> &b, SparseMatrixBase<Dest> &dest) const { internal::solve_sparse_through_dense_panels(derived(), b.derived(), dest.derived()); } #endif // EIGEN_PARSED_BY_DOXYGEN protected: mutable bool m_isInitialized; }; } // end namespace Eigen #endif // EIGEN_SPARSESOLVERBASE_H
4,424
34.4
116
h
abess
abess-master/python/include/Eigen/src/SparseCore/SparseTranspose.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2015 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSETRANSPOSE_H #define EIGEN_SPARSETRANSPOSE_H namespace Eigen { namespace internal { template<typename MatrixType,int CompressedAccess=int(MatrixType::Flags&CompressedAccessBit)> class SparseTransposeImpl : public SparseMatrixBase<Transpose<MatrixType> > {}; template<typename MatrixType> class SparseTransposeImpl<MatrixType,CompressedAccessBit> : public SparseCompressedBase<Transpose<MatrixType> > { typedef SparseCompressedBase<Transpose<MatrixType> > Base; public: using Base::derived; typedef typename Base::Scalar Scalar; typedef typename Base::StorageIndex StorageIndex; inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); } inline const Scalar* valuePtr() const { return derived().nestedExpression().valuePtr(); } inline const StorageIndex* innerIndexPtr() const { return derived().nestedExpression().innerIndexPtr(); } inline const StorageIndex* outerIndexPtr() const { return derived().nestedExpression().outerIndexPtr(); } inline const StorageIndex* innerNonZeroPtr() const { return derived().nestedExpression().innerNonZeroPtr(); } inline Scalar* valuePtr() { return derived().nestedExpression().valuePtr(); } inline StorageIndex* innerIndexPtr() { return derived().nestedExpression().innerIndexPtr(); } inline StorageIndex* outerIndexPtr() { return derived().nestedExpression().outerIndexPtr(); } inline StorageIndex* innerNonZeroPtr() { return derived().nestedExpression().innerNonZeroPtr(); } }; } template<typename MatrixType> class TransposeImpl<MatrixType,Sparse> : public internal::SparseTransposeImpl<MatrixType> { protected: typedef internal::SparseTransposeImpl<MatrixType> Base; }; namespace internal { template<typename ArgType> struct unary_evaluator<Transpose<ArgType>, IteratorBased> : public evaluator_base<Transpose<ArgType> > { typedef typename evaluator<ArgType>::InnerIterator EvalIterator; public: typedef Transpose<ArgType> XprType; inline Index nonZerosEstimate() const { return m_argImpl.nonZerosEstimate(); } class InnerIterator : public EvalIterator { public: EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer) : EvalIterator(unaryOp.m_argImpl,outer) {} Index row() const { return EvalIterator::col(); } Index col() const { return EvalIterator::row(); } }; enum { CoeffReadCost = evaluator<ArgType>::CoeffReadCost, Flags = XprType::Flags }; explicit unary_evaluator(const XprType& op) :m_argImpl(op.nestedExpression()) {} protected: evaluator<ArgType> m_argImpl; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_SPARSETRANSPOSE_H
3,175
33.150538
113
h
abess
abess-master/python/include/Eigen/src/SparseCore/SparseTriangularView.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2015 Gael Guennebaud <[email protected]> // Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSE_TRIANGULARVIEW_H #define EIGEN_SPARSE_TRIANGULARVIEW_H namespace Eigen { /** \ingroup SparseCore_Module * * \brief Base class for a triangular part in a \b sparse matrix * * This class is an abstract base class of class TriangularView, and objects of type TriangularViewImpl cannot be instantiated. * It extends class TriangularView with additional methods which are available for sparse expressions only. * * \sa class TriangularView, SparseMatrixBase::triangularView() */ template<typename MatrixType, unsigned int Mode> class TriangularViewImpl<MatrixType,Mode,Sparse> : public SparseMatrixBase<TriangularView<MatrixType,Mode> > { enum { SkipFirst = ((Mode&Lower) && !(MatrixType::Flags&RowMajorBit)) || ((Mode&Upper) && (MatrixType::Flags&RowMajorBit)), SkipLast = !SkipFirst, SkipDiag = (Mode&ZeroDiag) ? 1 : 0, HasUnitDiag = (Mode&UnitDiag) ? 1 : 0 }; typedef TriangularView<MatrixType,Mode> TriangularViewType; protected: // dummy solve function to make TriangularView happy. void solve() const; typedef SparseMatrixBase<TriangularViewType> Base; public: EIGEN_SPARSE_PUBLIC_INTERFACE(TriangularViewType) typedef typename MatrixType::Nested MatrixTypeNested; typedef typename internal::remove_reference<MatrixTypeNested>::type MatrixTypeNestedNonRef; typedef typename internal::remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned; template<typename RhsType, typename DstType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void _solve_impl(const RhsType &rhs, DstType &dst) const { if(!(internal::is_same<RhsType,DstType>::value && internal::extract_data(dst) == internal::extract_data(rhs))) dst = rhs; this->solveInPlace(dst); } /** Applies the inverse of \c *this to the dense vector or matrix \a other, "in-place" */ template<typename OtherDerived> void solveInPlace(MatrixBase<OtherDerived>& other) const; /** Applies the inverse of \c *this to the sparse vector or matrix \a other, "in-place" */ template<typename OtherDerived> void solveInPlace(SparseMatrixBase<OtherDerived>& other) const; }; namespace internal { template<typename ArgType, unsigned int Mode> struct unary_evaluator<TriangularView<ArgType,Mode>, IteratorBased> : evaluator_base<TriangularView<ArgType,Mode> > { typedef TriangularView<ArgType,Mode> XprType; protected: typedef typename XprType::Scalar Scalar; typedef typename XprType::StorageIndex StorageIndex; typedef typename evaluator<ArgType>::InnerIterator EvalIterator; enum { SkipFirst = ((Mode&Lower) && !(ArgType::Flags&RowMajorBit)) || ((Mode&Upper) && (ArgType::Flags&RowMajorBit)), SkipLast = !SkipFirst, SkipDiag = (Mode&ZeroDiag) ? 1 : 0, HasUnitDiag = (Mode&UnitDiag) ? 1 : 0 }; public: enum { CoeffReadCost = evaluator<ArgType>::CoeffReadCost, Flags = XprType::Flags }; explicit unary_evaluator(const XprType &xpr) : m_argImpl(xpr.nestedExpression()), m_arg(xpr.nestedExpression()) {} inline Index nonZerosEstimate() const { return m_argImpl.nonZerosEstimate(); } class InnerIterator : public EvalIterator { typedef EvalIterator Base; public: EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& xprEval, Index outer) : Base(xprEval.m_argImpl,outer), m_returnOne(false), m_containsDiag(Base::outer()<xprEval.m_arg.innerSize()) { if(SkipFirst) { while((*this) && ((HasUnitDiag||SkipDiag) ? this->index()<=outer : this->index()<outer)) Base::operator++(); if(HasUnitDiag) m_returnOne = m_containsDiag; } else if(HasUnitDiag && ((!Base::operator bool()) || Base::index()>=Base::outer())) { if((!SkipFirst) && Base::operator bool()) Base::operator++(); m_returnOne = m_containsDiag; } } EIGEN_STRONG_INLINE InnerIterator& operator++() { if(HasUnitDiag && m_returnOne) m_returnOne = false; else { Base::operator++(); if(HasUnitDiag && (!SkipFirst) && ((!Base::operator bool()) || Base::index()>=Base::outer())) { if((!SkipFirst) && Base::operator bool()) Base::operator++(); m_returnOne = m_containsDiag; } } return *this; } EIGEN_STRONG_INLINE operator bool() const { if(HasUnitDiag && m_returnOne) return true; if(SkipFirst) return Base::operator bool(); else { if (SkipDiag) return (Base::operator bool() && this->index() < this->outer()); else return (Base::operator bool() && this->index() <= this->outer()); } } // inline Index row() const { return (ArgType::Flags&RowMajorBit ? Base::outer() : this->index()); } // inline Index col() const { return (ArgType::Flags&RowMajorBit ? this->index() : Base::outer()); } inline StorageIndex index() const { if(HasUnitDiag && m_returnOne) return internal::convert_index<StorageIndex>(Base::outer()); else return Base::index(); } inline Scalar value() const { if(HasUnitDiag && m_returnOne) return Scalar(1); else return Base::value(); } protected: bool m_returnOne; bool m_containsDiag; private: Scalar& valueRef(); }; protected: evaluator<ArgType> m_argImpl; const ArgType& m_arg; }; } // end namespace internal template<typename Derived> template<int Mode> inline const TriangularView<const Derived, Mode> SparseMatrixBase<Derived>::triangularView() const { return TriangularView<const Derived, Mode>(derived()); } } // end namespace Eigen #endif // EIGEN_SPARSE_TRIANGULARVIEW_H
6,435
32.873684
128
h
abess
abess-master/python/include/Eigen/src/SparseCore/SparseVector.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2015 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEVECTOR_H #define EIGEN_SPARSEVECTOR_H namespace Eigen { /** \ingroup SparseCore_Module * \class SparseVector * * \brief a sparse vector class * * \tparam _Scalar the scalar type, i.e. the type of the coefficients * * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme. * * This class can be extended with the help of the plugin mechanism described on the page * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEVECTOR_PLUGIN. */ namespace internal { template<typename _Scalar, int _Options, typename _StorageIndex> struct traits<SparseVector<_Scalar, _Options, _StorageIndex> > { typedef _Scalar Scalar; typedef _StorageIndex StorageIndex; typedef Sparse StorageKind; typedef MatrixXpr XprKind; enum { IsColVector = (_Options & RowMajorBit) ? 0 : 1, RowsAtCompileTime = IsColVector ? Dynamic : 1, ColsAtCompileTime = IsColVector ? 1 : Dynamic, MaxRowsAtCompileTime = RowsAtCompileTime, MaxColsAtCompileTime = ColsAtCompileTime, Flags = _Options | NestByRefBit | LvalueBit | (IsColVector ? 0 : RowMajorBit) | CompressedAccessBit, SupportedAccessPatterns = InnerRandomAccessPattern }; }; // Sparse-Vector-Assignment kinds: enum { SVA_RuntimeSwitch, SVA_Inner, SVA_Outer }; template< typename Dest, typename Src, int AssignmentKind = !bool(Src::IsVectorAtCompileTime) ? SVA_RuntimeSwitch : Src::InnerSizeAtCompileTime==1 ? SVA_Outer : SVA_Inner> struct sparse_vector_assign_selector; } template<typename _Scalar, int _Options, typename _StorageIndex> class SparseVector : public SparseCompressedBase<SparseVector<_Scalar, _Options, _StorageIndex> > { typedef SparseCompressedBase<SparseVector> Base; using Base::convert_index; public: EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector) EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=) EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=) typedef internal::CompressedStorage<Scalar,StorageIndex> Storage; enum { IsColVector = internal::traits<SparseVector>::IsColVector }; enum { Options = _Options }; EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; } EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; } EIGEN_STRONG_INLINE Index innerSize() const { return m_size; } EIGEN_STRONG_INLINE Index outerSize() const { return 1; } EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return m_data.valuePtr(); } EIGEN_STRONG_INLINE Scalar* valuePtr() { return m_data.valuePtr(); } EIGEN_STRONG_INLINE const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); } EIGEN_STRONG_INLINE StorageIndex* innerIndexPtr() { return m_data.indexPtr(); } inline const StorageIndex* outerIndexPtr() const { return 0; } inline StorageIndex* outerIndexPtr() { return 0; } inline const StorageIndex* innerNonZeroPtr() const { return 0; } inline StorageIndex* innerNonZeroPtr() { return 0; } /** \internal */ inline Storage& data() { return m_data; } /** \internal */ inline const Storage& data() const { return m_data; } inline Scalar coeff(Index row, Index col) const { eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size)); return coeff(IsColVector ? row : col); } inline Scalar coeff(Index i) const { eigen_assert(i>=0 && i<m_size); return m_data.at(StorageIndex(i)); } inline Scalar& coeffRef(Index row, Index col) { eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size)); return coeffRef(IsColVector ? row : col); } /** \returns a reference to the coefficient value at given index \a i * This operation involes a log(rho*size) binary search. If the coefficient does not * exist yet, then a sorted insertion into a sequential buffer is performed. * * This insertion might be very costly if the number of nonzeros above \a i is large. */ inline Scalar& coeffRef(Index i) { eigen_assert(i>=0 && i<m_size); return m_data.atWithInsertion(StorageIndex(i)); } public: typedef typename Base::InnerIterator InnerIterator; typedef typename Base::ReverseInnerIterator ReverseInnerIterator; inline void setZero() { m_data.clear(); } /** \returns the number of non zero coefficients */ inline Index nonZeros() const { return m_data.size(); } inline void startVec(Index outer) { EIGEN_UNUSED_VARIABLE(outer); eigen_assert(outer==0); } inline Scalar& insertBackByOuterInner(Index outer, Index inner) { EIGEN_UNUSED_VARIABLE(outer); eigen_assert(outer==0); return insertBack(inner); } inline Scalar& insertBack(Index i) { m_data.append(0, i); return m_data.value(m_data.size()-1); } Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner) { EIGEN_UNUSED_VARIABLE(outer); eigen_assert(outer==0); return insertBackUnordered(inner); } inline Scalar& insertBackUnordered(Index i) { m_data.append(0, i); return m_data.value(m_data.size()-1); } inline Scalar& insert(Index row, Index col) { eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size)); Index inner = IsColVector ? row : col; Index outer = IsColVector ? col : row; EIGEN_ONLY_USED_FOR_DEBUG(outer); eigen_assert(outer==0); return insert(inner); } Scalar& insert(Index i) { eigen_assert(i>=0 && i<m_size); Index startId = 0; Index p = Index(m_data.size()) - 1; // TODO smart realloc m_data.resize(p+2,1); while ( (p >= startId) && (m_data.index(p) > i) ) { m_data.index(p+1) = m_data.index(p); m_data.value(p+1) = m_data.value(p); --p; } m_data.index(p+1) = convert_index(i); m_data.value(p+1) = 0; return m_data.value(p+1); } /** */ inline void reserve(Index reserveSize) { m_data.reserve(reserveSize); } inline void finalize() {} /** \copydoc SparseMatrix::prune(const Scalar&,const RealScalar&) */ void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision()) { m_data.prune(reference,epsilon); } /** Resizes the sparse vector to \a rows x \a cols * * This method is provided for compatibility with matrices. * For a column vector, \a cols must be equal to 1. * For a row vector, \a rows must be equal to 1. * * \sa resize(Index) */ void resize(Index rows, Index cols) { eigen_assert((IsColVector ? cols : rows)==1 && "Outer dimension must equal 1"); resize(IsColVector ? rows : cols); } /** Resizes the sparse vector to \a newSize * This method deletes all entries, thus leaving an empty sparse vector * * \sa conservativeResize(), setZero() */ void resize(Index newSize) { m_size = newSize; m_data.clear(); } /** Resizes the sparse vector to \a newSize, while leaving old values untouched. * * If the size of the vector is decreased, then the storage of the out-of bounds coefficients is kept and reserved. * Call .data().squeeze() to free extra memory. * * \sa reserve(), setZero() */ void conservativeResize(Index newSize) { if (newSize < m_size) { Index i = 0; while (i<m_data.size() && m_data.index(i)<newSize) ++i; m_data.resize(i); } m_size = newSize; } void resizeNonZeros(Index size) { m_data.resize(size); } inline SparseVector() : m_size(0) { check_template_parameters(); resize(0); } explicit inline SparseVector(Index size) : m_size(0) { check_template_parameters(); resize(size); } inline SparseVector(Index rows, Index cols) : m_size(0) { check_template_parameters(); resize(rows,cols); } template<typename OtherDerived> inline SparseVector(const SparseMatrixBase<OtherDerived>& other) : m_size(0) { #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN #endif check_template_parameters(); *this = other.derived(); } inline SparseVector(const SparseVector& other) : Base(other), m_size(0) { check_template_parameters(); *this = other.derived(); } /** Swaps the values of \c *this and \a other. * Overloaded for performance: this version performs a \em shallow swap by swaping pointers and attributes only. * \sa SparseMatrixBase::swap() */ inline void swap(SparseVector& other) { std::swap(m_size, other.m_size); m_data.swap(other.m_data); } template<int OtherOptions> inline void swap(SparseMatrix<Scalar,OtherOptions,StorageIndex>& other) { eigen_assert(other.outerSize()==1); std::swap(m_size, other.m_innerSize); m_data.swap(other.m_data); } inline SparseVector& operator=(const SparseVector& other) { if (other.isRValue()) { swap(other.const_cast_derived()); } else { resize(other.size()); m_data = other.m_data; } return *this; } template<typename OtherDerived> inline SparseVector& operator=(const SparseMatrixBase<OtherDerived>& other) { SparseVector tmp(other.size()); internal::sparse_vector_assign_selector<SparseVector,OtherDerived>::run(tmp,other.derived()); this->swap(tmp); return *this; } #ifndef EIGEN_PARSED_BY_DOXYGEN template<typename Lhs, typename Rhs> inline SparseVector& operator=(const SparseSparseProduct<Lhs,Rhs>& product) { return Base::operator=(product); } #endif friend std::ostream & operator << (std::ostream & s, const SparseVector& m) { for (Index i=0; i<m.nonZeros(); ++i) s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") "; s << std::endl; return s; } /** Destructor */ inline ~SparseVector() {} /** Overloaded for performance */ Scalar sum() const; public: /** \internal \deprecated use setZero() and reserve() */ EIGEN_DEPRECATED void startFill(Index reserve) { setZero(); m_data.reserve(reserve); } /** \internal \deprecated use insertBack(Index,Index) */ EIGEN_DEPRECATED Scalar& fill(Index r, Index c) { eigen_assert(r==0 || c==0); return fill(IsColVector ? r : c); } /** \internal \deprecated use insertBack(Index) */ EIGEN_DEPRECATED Scalar& fill(Index i) { m_data.append(0, i); return m_data.value(m_data.size()-1); } /** \internal \deprecated use insert(Index,Index) */ EIGEN_DEPRECATED Scalar& fillrand(Index r, Index c) { eigen_assert(r==0 || c==0); return fillrand(IsColVector ? r : c); } /** \internal \deprecated use insert(Index) */ EIGEN_DEPRECATED Scalar& fillrand(Index i) { return insert(i); } /** \internal \deprecated use finalize() */ EIGEN_DEPRECATED void endFill() {} // These two functions were here in the 3.1 release, so let's keep them in case some code rely on them. /** \internal \deprecated use data() */ EIGEN_DEPRECATED Storage& _data() { return m_data; } /** \internal \deprecated use data() */ EIGEN_DEPRECATED const Storage& _data() const { return m_data; } # ifdef EIGEN_SPARSEVECTOR_PLUGIN # include EIGEN_SPARSEVECTOR_PLUGIN # endif protected: static void check_template_parameters() { EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); EIGEN_STATIC_ASSERT((_Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS); } Storage m_data; Index m_size; }; namespace internal { template<typename _Scalar, int _Options, typename _Index> struct evaluator<SparseVector<_Scalar,_Options,_Index> > : evaluator_base<SparseVector<_Scalar,_Options,_Index> > { typedef SparseVector<_Scalar,_Options,_Index> SparseVectorType; typedef evaluator_base<SparseVectorType> Base; typedef typename SparseVectorType::InnerIterator InnerIterator; typedef typename SparseVectorType::ReverseInnerIterator ReverseInnerIterator; enum { CoeffReadCost = NumTraits<_Scalar>::ReadCost, Flags = SparseVectorType::Flags }; evaluator() : Base() {} explicit evaluator(const SparseVectorType &mat) : m_matrix(&mat) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } inline Index nonZerosEstimate() const { return m_matrix->nonZeros(); } operator SparseVectorType&() { return m_matrix->const_cast_derived(); } operator const SparseVectorType&() const { return *m_matrix; } const SparseVectorType *m_matrix; }; template< typename Dest, typename Src> struct sparse_vector_assign_selector<Dest,Src,SVA_Inner> { static void run(Dest& dst, const Src& src) { eigen_internal_assert(src.innerSize()==src.size()); typedef internal::evaluator<Src> SrcEvaluatorType; SrcEvaluatorType srcEval(src); for(typename SrcEvaluatorType::InnerIterator it(srcEval, 0); it; ++it) dst.insert(it.index()) = it.value(); } }; template< typename Dest, typename Src> struct sparse_vector_assign_selector<Dest,Src,SVA_Outer> { static void run(Dest& dst, const Src& src) { eigen_internal_assert(src.outerSize()==src.size()); typedef internal::evaluator<Src> SrcEvaluatorType; SrcEvaluatorType srcEval(src); for(Index i=0; i<src.size(); ++i) { typename SrcEvaluatorType::InnerIterator it(srcEval, i); if(it) dst.insert(i) = it.value(); } } }; template< typename Dest, typename Src> struct sparse_vector_assign_selector<Dest,Src,SVA_RuntimeSwitch> { static void run(Dest& dst, const Src& src) { if(src.outerSize()==1) sparse_vector_assign_selector<Dest,Src,SVA_Inner>::run(dst, src); else sparse_vector_assign_selector<Dest,Src,SVA_Outer>::run(dst, src); } }; } } // end namespace Eigen #endif // EIGEN_SPARSEVECTOR_H
14,831
29.964509
120
h
abess
abess-master/python/include/Eigen/src/SparseCore/SparseView.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011-2014 Gael Guennebaud <[email protected]> // Copyright (C) 2010 Daniel Lowengrub <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEVIEW_H #define EIGEN_SPARSEVIEW_H namespace Eigen { namespace internal { template<typename MatrixType> struct traits<SparseView<MatrixType> > : traits<MatrixType> { typedef typename MatrixType::StorageIndex StorageIndex; typedef Sparse StorageKind; enum { Flags = int(traits<MatrixType>::Flags) & (RowMajorBit) }; }; } // end namespace internal /** \ingroup SparseCore_Module * \class SparseView * * \brief Expression of a dense or sparse matrix with zero or too small values removed * * \tparam MatrixType the type of the object of which we are removing the small entries * * This class represents an expression of a given dense or sparse matrix with * entries smaller than \c reference * \c epsilon are removed. * It is the return type of MatrixBase::sparseView() and SparseMatrixBase::pruned() * and most of the time this is the only way it is used. * * \sa MatrixBase::sparseView(), SparseMatrixBase::pruned() */ template<typename MatrixType> class SparseView : public SparseMatrixBase<SparseView<MatrixType> > { typedef typename MatrixType::Nested MatrixTypeNested; typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested; typedef SparseMatrixBase<SparseView > Base; public: EIGEN_SPARSE_PUBLIC_INTERFACE(SparseView) typedef typename internal::remove_all<MatrixType>::type NestedExpression; explicit SparseView(const MatrixType& mat, const Scalar& reference = Scalar(0), const RealScalar &epsilon = NumTraits<Scalar>::dummy_precision()) : m_matrix(mat), m_reference(reference), m_epsilon(epsilon) {} inline Index rows() const { return m_matrix.rows(); } inline Index cols() const { return m_matrix.cols(); } inline Index innerSize() const { return m_matrix.innerSize(); } inline Index outerSize() const { return m_matrix.outerSize(); } /** \returns the nested expression */ const typename internal::remove_all<MatrixTypeNested>::type& nestedExpression() const { return m_matrix; } Scalar reference() const { return m_reference; } RealScalar epsilon() const { return m_epsilon; } protected: MatrixTypeNested m_matrix; Scalar m_reference; RealScalar m_epsilon; }; namespace internal { // TODO find a way to unify the two following variants // This is tricky because implementing an inner iterator on top of an IndexBased evaluator is // not easy because the evaluators do not expose the sizes of the underlying expression. template<typename ArgType> struct unary_evaluator<SparseView<ArgType>, IteratorBased> : public evaluator_base<SparseView<ArgType> > { typedef typename evaluator<ArgType>::InnerIterator EvalIterator; public: typedef SparseView<ArgType> XprType; class InnerIterator : public EvalIterator { typedef typename XprType::Scalar Scalar; public: EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, Index outer) : EvalIterator(sve.m_argImpl,outer), m_view(sve.m_view) { incrementToNonZero(); } EIGEN_STRONG_INLINE InnerIterator& operator++() { EvalIterator::operator++(); incrementToNonZero(); return *this; } using EvalIterator::value; protected: const XprType &m_view; private: void incrementToNonZero() { while((bool(*this)) && internal::isMuchSmallerThan(value(), m_view.reference(), m_view.epsilon())) { EvalIterator::operator++(); } } }; enum { CoeffReadCost = evaluator<ArgType>::CoeffReadCost, Flags = XprType::Flags }; explicit unary_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_view(xpr) {} protected: evaluator<ArgType> m_argImpl; const XprType &m_view; }; template<typename ArgType> struct unary_evaluator<SparseView<ArgType>, IndexBased> : public evaluator_base<SparseView<ArgType> > { public: typedef SparseView<ArgType> XprType; protected: enum { IsRowMajor = (XprType::Flags&RowMajorBit)==RowMajorBit }; typedef typename XprType::Scalar Scalar; typedef typename XprType::StorageIndex StorageIndex; public: class InnerIterator { public: EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, Index outer) : m_sve(sve), m_inner(0), m_outer(outer), m_end(sve.m_view.innerSize()) { incrementToNonZero(); } EIGEN_STRONG_INLINE InnerIterator& operator++() { m_inner++; incrementToNonZero(); return *this; } EIGEN_STRONG_INLINE Scalar value() const { return (IsRowMajor) ? m_sve.m_argImpl.coeff(m_outer, m_inner) : m_sve.m_argImpl.coeff(m_inner, m_outer); } EIGEN_STRONG_INLINE StorageIndex index() const { return m_inner; } inline Index row() const { return IsRowMajor ? m_outer : index(); } inline Index col() const { return IsRowMajor ? index() : m_outer; } EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; } protected: const unary_evaluator &m_sve; Index m_inner; const Index m_outer; const Index m_end; private: void incrementToNonZero() { while((bool(*this)) && internal::isMuchSmallerThan(value(), m_sve.m_view.reference(), m_sve.m_view.epsilon())) { m_inner++; } } }; enum { CoeffReadCost = evaluator<ArgType>::CoeffReadCost, Flags = XprType::Flags }; explicit unary_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_view(xpr) {} protected: evaluator<ArgType> m_argImpl; const XprType &m_view; }; } // end namespace internal /** \ingroup SparseCore_Module * * \returns a sparse expression of the dense expression \c *this with values smaller than * \a reference * \a epsilon removed. * * This method is typically used when prototyping to convert a quickly assembled dense Matrix \c D to a SparseMatrix \c S: * \code * MatrixXd D(n,m); * SparseMatrix<double> S; * S = D.sparseView(); // suppress numerical zeros (exact) * S = D.sparseView(reference); * S = D.sparseView(reference,epsilon); * \endcode * where \a reference is a meaningful non zero reference value, * and \a epsilon is a tolerance factor defaulting to NumTraits<Scalar>::dummy_precision(). * * \sa SparseMatrixBase::pruned(), class SparseView */ template<typename Derived> const SparseView<Derived> MatrixBase<Derived>::sparseView(const Scalar& reference, const typename NumTraits<Scalar>::Real& epsilon) const { return SparseView<Derived>(derived(), reference, epsilon); } /** \returns an expression of \c *this with values smaller than * \a reference * \a epsilon removed. * * This method is typically used in conjunction with the product of two sparse matrices * to automatically prune the smallest values as follows: * \code * C = (A*B).pruned(); // suppress numerical zeros (exact) * C = (A*B).pruned(ref); * C = (A*B).pruned(ref,epsilon); * \endcode * where \c ref is a meaningful non zero reference value. * */ template<typename Derived> const SparseView<Derived> SparseMatrixBase<Derived>::pruned(const Scalar& reference, const RealScalar& epsilon) const { return SparseView<Derived>(derived(), reference, epsilon); } } // end namespace Eigen #endif
8,110
30.933071
123
h
abess
abess-master/python/include/Eigen/src/SparseLU/SparseLU_Structs.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. /* * NOTE: This file comes from a partly modified version of files slu_[s,d,c,z]defs.h * -- SuperLU routine (version 4.1) -- * Univ. of California Berkeley, Xerox Palo Alto Research Center, * and Lawrence Berkeley National Lab. * November, 2010 * * Global data structures used in LU factorization - * * nsuper: #supernodes = nsuper + 1, numbered [0, nsuper]. * (xsup,supno): supno[i] is the supernode no to which i belongs; * xsup(s) points to the beginning of the s-th supernode. * e.g. supno 0 1 2 2 3 3 3 4 4 4 4 4 (n=12) * xsup 0 1 2 4 7 12 * Note: dfs will be performed on supernode rep. relative to the new * row pivoting ordering * * (xlsub,lsub): lsub[*] contains the compressed subscript of * rectangular supernodes; xlsub[j] points to the starting * location of the j-th column in lsub[*]. Note that xlsub * is indexed by column. * Storage: original row subscripts * * During the course of sparse LU factorization, we also use * (xlsub,lsub) for the purpose of symmetric pruning. For each * supernode {s,s+1,...,t=s+r} with first column s and last * column t, the subscript set * lsub[j], j=xlsub[s], .., xlsub[s+1]-1 * is the structure of column s (i.e. structure of this supernode). * It is used for the storage of numerical values. * Furthermore, * lsub[j], j=xlsub[t], .., xlsub[t+1]-1 * is the structure of the last column t of this supernode. * It is for the purpose of symmetric pruning. Therefore, the * structural subscripts can be rearranged without making physical * interchanges among the numerical values. * * However, if the supernode has only one column, then we * only keep one set of subscripts. For any subscript interchange * performed, similar interchange must be done on the numerical * values. * * The last column structures (for pruning) will be removed * after the numercial LU factorization phase. * * (xlusup,lusup): lusup[*] contains the numerical values of the * rectangular supernodes; xlusup[j] points to the starting * location of the j-th column in storage vector lusup[*] * Note: xlusup is indexed by column. * Each rectangular supernode is stored by column-major * scheme, consistent with Fortran 2-dim array storage. * * (xusub,ucol,usub): ucol[*] stores the numerical values of * U-columns outside the rectangular supernodes. The row * subscript of nonzero ucol[k] is stored in usub[k]. * xusub[i] points to the starting location of column i in ucol. * Storage: new row subscripts; that is subscripts of PA. */ #ifndef EIGEN_LU_STRUCTS #define EIGEN_LU_STRUCTS namespace Eigen { namespace internal { typedef enum {LUSUP, UCOL, LSUB, USUB, LLVL, ULVL} MemType; template <typename IndexVector, typename ScalarVector> struct LU_GlobalLU_t { typedef typename IndexVector::Scalar StorageIndex; IndexVector xsup; //First supernode column ... xsup(s) points to the beginning of the s-th supernode IndexVector supno; // Supernode number corresponding to this column (column to supernode mapping) ScalarVector lusup; // nonzero values of L ordered by columns IndexVector lsub; // Compressed row indices of L rectangular supernodes. IndexVector xlusup; // pointers to the beginning of each column in lusup IndexVector xlsub; // pointers to the beginning of each column in lsub Index nzlmax; // Current max size of lsub Index nzlumax; // Current max size of lusup ScalarVector ucol; // nonzero values of U ordered by columns IndexVector usub; // row indices of U columns in ucol IndexVector xusub; // Pointers to the beginning of each column of U in ucol Index nzumax; // Current max size of ucol Index n; // Number of columns in the matrix Index num_expansions; }; // Values to set for performance struct perfvalues { Index panel_size; // a panel consists of at most <panel_size> consecutive columns Index relax; // To control degree of relaxing supernodes. If the number of nodes (columns) // in a subtree of the elimination tree is less than relax, this subtree is considered // as one supernode regardless of the row structures of those columns Index maxsuper; // The maximum size for a supernode in complete LU Index rowblk; // The minimum row dimension for 2-D blocking to be used; Index colblk; // The minimum column dimension for 2-D blocking to be used; Index fillfactor; // The estimated fills factors for L and U, compared with A }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_LU_STRUCTS
4,972
43.801802
103
h
abess
abess-master/python/include/Eigen/src/SparseLU/SparseLU_Utils.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSELU_UTILS_H #define EIGEN_SPARSELU_UTILS_H namespace Eigen { namespace internal { /** * \brief Count Nonzero elements in the factors */ template <typename Scalar, typename StorageIndex> void SparseLUImpl<Scalar,StorageIndex>::countnz(const Index n, Index& nnzL, Index& nnzU, GlobalLU_t& glu) { nnzL = 0; nnzU = (glu.xusub)(n); Index nsuper = (glu.supno)(n); Index jlen; Index i, j, fsupc; if (n <= 0 ) return; // For each supernode for (i = 0; i <= nsuper; i++) { fsupc = glu.xsup(i); jlen = glu.xlsub(fsupc+1) - glu.xlsub(fsupc); for (j = fsupc; j < glu.xsup(i+1); j++) { nnzL += jlen; nnzU += j - fsupc + 1; jlen--; } } } /** * \brief Fix up the data storage lsub for L-subscripts. * * It removes the subscripts sets for structural pruning, * and applies permutation to the remaining subscripts * */ template <typename Scalar, typename StorageIndex> void SparseLUImpl<Scalar,StorageIndex>::fixupL(const Index n, const IndexVector& perm_r, GlobalLU_t& glu) { Index fsupc, i, j, k, jstart; StorageIndex nextl = 0; Index nsuper = (glu.supno)(n); // For each supernode for (i = 0; i <= nsuper; i++) { fsupc = glu.xsup(i); jstart = glu.xlsub(fsupc); glu.xlsub(fsupc) = nextl; for (j = jstart; j < glu.xlsub(fsupc + 1); j++) { glu.lsub(nextl) = perm_r(glu.lsub(j)); // Now indexed into P*A nextl++; } for (k = fsupc+1; k < glu.xsup(i+1); k++) glu.xlsub(k) = nextl; // other columns in supernode i } glu.xlsub(n) = nextl; } } // end namespace internal } // end namespace Eigen #endif // EIGEN_SPARSELU_UTILS_H
2,047
24.283951
105
h
abess
abess-master/python/include/Eigen/src/SparseLU/SparseLU_column_bmod.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Désiré Nuentsa-Wakam <[email protected]> // Copyright (C) 2012 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. /* * NOTE: This file is the modified version of xcolumn_bmod.c file in SuperLU * -- SuperLU routine (version 3.0) -- * Univ. of California Berkeley, Xerox Palo Alto Research Center, * and Lawrence Berkeley National Lab. * October 15, 2003 * * Copyright (c) 1994 by Xerox Corporation. All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program for any * purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is * granted, provided the above notices are retained, and a notice that * the code was modified is included with the above copyright notice. */ #ifndef SPARSELU_COLUMN_BMOD_H #define SPARSELU_COLUMN_BMOD_H namespace Eigen { namespace internal { /** * \brief Performs numeric block updates (sup-col) in topological order * * \param jcol current column to update * \param nseg Number of segments in the U part * \param dense Store the full representation of the column * \param tempv working array * \param segrep segment representative ... * \param repfnz ??? First nonzero column in each row ??? ... * \param fpanelc First column in the current panel * \param glu Global LU data. * \return 0 - successful return * > 0 - number of bytes allocated when run out of space * */ template <typename Scalar, typename StorageIndex> Index SparseLUImpl<Scalar,StorageIndex>::column_bmod(const Index jcol, const Index nseg, BlockScalarVector dense, ScalarVector& tempv, BlockIndexVector segrep, BlockIndexVector repfnz, Index fpanelc, GlobalLU_t& glu) { Index jsupno, k, ksub, krep, ksupno; Index lptr, nrow, isub, irow, nextlu, new_next, ufirst; Index fsupc, nsupc, nsupr, luptr, kfnz, no_zeros; /* krep = representative of current k-th supernode * fsupc = first supernodal column * nsupc = number of columns in a supernode * nsupr = number of rows in a supernode * luptr = location of supernodal LU-block in storage * kfnz = first nonz in the k-th supernodal segment * no_zeros = no lf leading zeros in a supernodal U-segment */ jsupno = glu.supno(jcol); // For each nonzero supernode segment of U[*,j] in topological order k = nseg - 1; Index d_fsupc; // distance between the first column of the current panel and the // first column of the current snode Index fst_col; // First column within small LU update Index segsize; for (ksub = 0; ksub < nseg; ksub++) { krep = segrep(k); k--; ksupno = glu.supno(krep); if (jsupno != ksupno ) { // outside the rectangular supernode fsupc = glu.xsup(ksupno); fst_col = (std::max)(fsupc, fpanelc); // Distance from the current supernode to the current panel; // d_fsupc = 0 if fsupc > fpanelc d_fsupc = fst_col - fsupc; luptr = glu.xlusup(fst_col) + d_fsupc; lptr = glu.xlsub(fsupc) + d_fsupc; kfnz = repfnz(krep); kfnz = (std::max)(kfnz, fpanelc); segsize = krep - kfnz + 1; nsupc = krep - fst_col + 1; nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc); nrow = nsupr - d_fsupc - nsupc; Index lda = glu.xlusup(fst_col+1) - glu.xlusup(fst_col); // Perform a triangular solver and block update, // then scatter the result of sup-col update to dense no_zeros = kfnz - fst_col; if(segsize==1) LU_kernel_bmod<1>::run(segsize, dense, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros); else LU_kernel_bmod<Dynamic>::run(segsize, dense, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros); } // end if jsupno } // end for each segment // Process the supernodal portion of L\U[*,j] nextlu = glu.xlusup(jcol); fsupc = glu.xsup(jsupno); // copy the SPA dense into L\U[*,j] Index mem; new_next = nextlu + glu.xlsub(fsupc + 1) - glu.xlsub(fsupc); Index offset = internal::first_multiple<Index>(new_next, internal::packet_traits<Scalar>::size) - new_next; if(offset) new_next += offset; while (new_next > glu.nzlumax ) { mem = memXpand<ScalarVector>(glu.lusup, glu.nzlumax, nextlu, LUSUP, glu.num_expansions); if (mem) return mem; } for (isub = glu.xlsub(fsupc); isub < glu.xlsub(fsupc+1); isub++) { irow = glu.lsub(isub); glu.lusup(nextlu) = dense(irow); dense(irow) = Scalar(0.0); ++nextlu; } if(offset) { glu.lusup.segment(nextlu,offset).setZero(); nextlu += offset; } glu.xlusup(jcol + 1) = StorageIndex(nextlu); // close L\U(*,jcol); /* For more updates within the panel (also within the current supernode), * should start from the first column of the panel, or the first column * of the supernode, whichever is bigger. There are two cases: * 1) fsupc < fpanelc, then fst_col <-- fpanelc * 2) fsupc >= fpanelc, then fst_col <-- fsupc */ fst_col = (std::max)(fsupc, fpanelc); if (fst_col < jcol) { // Distance between the current supernode and the current panel // d_fsupc = 0 if fsupc >= fpanelc d_fsupc = fst_col - fsupc; lptr = glu.xlsub(fsupc) + d_fsupc; luptr = glu.xlusup(fst_col) + d_fsupc; nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc); // leading dimension nsupc = jcol - fst_col; // excluding jcol nrow = nsupr - d_fsupc - nsupc; // points to the beginning of jcol in snode L\U(jsupno) ufirst = glu.xlusup(jcol) + d_fsupc; Index lda = glu.xlusup(jcol+1) - glu.xlusup(jcol); MappedMatrixBlock A( &(glu.lusup.data()[luptr]), nsupc, nsupc, OuterStride<>(lda) ); VectorBlock<ScalarVector> u(glu.lusup, ufirst, nsupc); u = A.template triangularView<UnitLower>().solve(u); new (&A) MappedMatrixBlock ( &(glu.lusup.data()[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) ); VectorBlock<ScalarVector> l(glu.lusup, ufirst+nsupc, nrow); l.noalias() -= A * u; } // End if fst_col return 0; } } // end namespace internal } // end namespace Eigen #endif // SPARSELU_COLUMN_BMOD_H
6,710
35.873626
134
h
abess
abess-master/python/include/Eigen/src/SparseLU/SparseLU_gemm_kernel.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSELU_GEMM_KERNEL_H #define EIGEN_SPARSELU_GEMM_KERNEL_H namespace Eigen { namespace internal { /** \internal * A general matrix-matrix product kernel optimized for the SparseLU factorization. * - A, B, and C must be column major * - lda and ldc must be multiples of the respective packet size * - C must have the same alignment as A */ template<typename Scalar> EIGEN_DONT_INLINE void sparselu_gemm(Index m, Index n, Index d, const Scalar* A, Index lda, const Scalar* B, Index ldb, Scalar* C, Index ldc) { using namespace Eigen::internal; typedef typename packet_traits<Scalar>::type Packet; enum { NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS, PacketSize = packet_traits<Scalar>::size, PM = 8, // peeling in M RN = 2, // register blocking RK = NumberOfRegisters>=16 ? 4 : 2, // register blocking BM = 4096/sizeof(Scalar), // number of rows of A-C per chunk SM = PM*PacketSize // step along M }; Index d_end = (d/RK)*RK; // number of columns of A (rows of B) suitable for full register blocking Index n_end = (n/RN)*RN; // number of columns of B-C suitable for processing RN columns at once Index i0 = internal::first_default_aligned(A,m); eigen_internal_assert(((lda%PacketSize)==0) && ((ldc%PacketSize)==0) && (i0==internal::first_default_aligned(C,m))); // handle the non aligned rows of A and C without any optimization: for(Index i=0; i<i0; ++i) { for(Index j=0; j<n; ++j) { Scalar c = C[i+j*ldc]; for(Index k=0; k<d; ++k) c += B[k+j*ldb] * A[i+k*lda]; C[i+j*ldc] = c; } } // process the remaining rows per chunk of BM rows for(Index ib=i0; ib<m; ib+=BM) { Index actual_b = std::min<Index>(BM, m-ib); // actual number of rows Index actual_b_end1 = (actual_b/SM)*SM; // actual number of rows suitable for peeling Index actual_b_end2 = (actual_b/PacketSize)*PacketSize; // actual number of rows suitable for vectorization // Let's process two columns of B-C at once for(Index j=0; j<n_end; j+=RN) { const Scalar* Bc0 = B+(j+0)*ldb; const Scalar* Bc1 = B+(j+1)*ldb; for(Index k=0; k<d_end; k+=RK) { // load and expand a RN x RK block of B Packet b00, b10, b20, b30, b01, b11, b21, b31; { b00 = pset1<Packet>(Bc0[0]); } { b10 = pset1<Packet>(Bc0[1]); } if(RK==4) { b20 = pset1<Packet>(Bc0[2]); } if(RK==4) { b30 = pset1<Packet>(Bc0[3]); } { b01 = pset1<Packet>(Bc1[0]); } { b11 = pset1<Packet>(Bc1[1]); } if(RK==4) { b21 = pset1<Packet>(Bc1[2]); } if(RK==4) { b31 = pset1<Packet>(Bc1[3]); } Packet a0, a1, a2, a3, c0, c1, t0, t1; const Scalar* A0 = A+ib+(k+0)*lda; const Scalar* A1 = A+ib+(k+1)*lda; const Scalar* A2 = A+ib+(k+2)*lda; const Scalar* A3 = A+ib+(k+3)*lda; Scalar* C0 = C+ib+(j+0)*ldc; Scalar* C1 = C+ib+(j+1)*ldc; a0 = pload<Packet>(A0); a1 = pload<Packet>(A1); if(RK==4) { a2 = pload<Packet>(A2); a3 = pload<Packet>(A3); } else { // workaround "may be used uninitialized in this function" warning a2 = a3 = a0; } #define KMADD(c, a, b, tmp) {tmp = b; tmp = pmul(a,tmp); c = padd(c,tmp);} #define WORK(I) \ c0 = pload<Packet>(C0+i+(I)*PacketSize); \ c1 = pload<Packet>(C1+i+(I)*PacketSize); \ KMADD(c0, a0, b00, t0) \ KMADD(c1, a0, b01, t1) \ a0 = pload<Packet>(A0+i+(I+1)*PacketSize); \ KMADD(c0, a1, b10, t0) \ KMADD(c1, a1, b11, t1) \ a1 = pload<Packet>(A1+i+(I+1)*PacketSize); \ if(RK==4){ KMADD(c0, a2, b20, t0) }\ if(RK==4){ KMADD(c1, a2, b21, t1) }\ if(RK==4){ a2 = pload<Packet>(A2+i+(I+1)*PacketSize); }\ if(RK==4){ KMADD(c0, a3, b30, t0) }\ if(RK==4){ KMADD(c1, a3, b31, t1) }\ if(RK==4){ a3 = pload<Packet>(A3+i+(I+1)*PacketSize); }\ pstore(C0+i+(I)*PacketSize, c0); \ pstore(C1+i+(I)*PacketSize, c1) // process rows of A' - C' with aggressive vectorization and peeling for(Index i=0; i<actual_b_end1; i+=PacketSize*8) { EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL1"); prefetch((A0+i+(5)*PacketSize)); prefetch((A1+i+(5)*PacketSize)); if(RK==4) prefetch((A2+i+(5)*PacketSize)); if(RK==4) prefetch((A3+i+(5)*PacketSize)); WORK(0); WORK(1); WORK(2); WORK(3); WORK(4); WORK(5); WORK(6); WORK(7); } // process the remaining rows with vectorization only for(Index i=actual_b_end1; i<actual_b_end2; i+=PacketSize) { WORK(0); } #undef WORK // process the remaining rows without vectorization for(Index i=actual_b_end2; i<actual_b; ++i) { if(RK==4) { C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]+A2[i]*Bc0[2]+A3[i]*Bc0[3]; C1[i] += A0[i]*Bc1[0]+A1[i]*Bc1[1]+A2[i]*Bc1[2]+A3[i]*Bc1[3]; } else { C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]; C1[i] += A0[i]*Bc1[0]+A1[i]*Bc1[1]; } } Bc0 += RK; Bc1 += RK; } // peeled loop on k } // peeled loop on the columns j // process the last column (we now perform a matrix-vector product) if((n-n_end)>0) { const Scalar* Bc0 = B+(n-1)*ldb; for(Index k=0; k<d_end; k+=RK) { // load and expand a 1 x RK block of B Packet b00, b10, b20, b30; b00 = pset1<Packet>(Bc0[0]); b10 = pset1<Packet>(Bc0[1]); if(RK==4) b20 = pset1<Packet>(Bc0[2]); if(RK==4) b30 = pset1<Packet>(Bc0[3]); Packet a0, a1, a2, a3, c0, t0/*, t1*/; const Scalar* A0 = A+ib+(k+0)*lda; const Scalar* A1 = A+ib+(k+1)*lda; const Scalar* A2 = A+ib+(k+2)*lda; const Scalar* A3 = A+ib+(k+3)*lda; Scalar* C0 = C+ib+(n_end)*ldc; a0 = pload<Packet>(A0); a1 = pload<Packet>(A1); if(RK==4) { a2 = pload<Packet>(A2); a3 = pload<Packet>(A3); } else { // workaround "may be used uninitialized in this function" warning a2 = a3 = a0; } #define WORK(I) \ c0 = pload<Packet>(C0+i+(I)*PacketSize); \ KMADD(c0, a0, b00, t0) \ a0 = pload<Packet>(A0+i+(I+1)*PacketSize); \ KMADD(c0, a1, b10, t0) \ a1 = pload<Packet>(A1+i+(I+1)*PacketSize); \ if(RK==4){ KMADD(c0, a2, b20, t0) }\ if(RK==4){ a2 = pload<Packet>(A2+i+(I+1)*PacketSize); }\ if(RK==4){ KMADD(c0, a3, b30, t0) }\ if(RK==4){ a3 = pload<Packet>(A3+i+(I+1)*PacketSize); }\ pstore(C0+i+(I)*PacketSize, c0); // agressive vectorization and peeling for(Index i=0; i<actual_b_end1; i+=PacketSize*8) { EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL2"); WORK(0); WORK(1); WORK(2); WORK(3); WORK(4); WORK(5); WORK(6); WORK(7); } // vectorization only for(Index i=actual_b_end1; i<actual_b_end2; i+=PacketSize) { WORK(0); } // remaining scalars for(Index i=actual_b_end2; i<actual_b; ++i) { if(RK==4) C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]+A2[i]*Bc0[2]+A3[i]*Bc0[3]; else C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]; } Bc0 += RK; #undef WORK } } // process the last columns of A, corresponding to the last rows of B Index rd = d-d_end; if(rd>0) { for(Index j=0; j<n; ++j) { enum { Alignment = PacketSize>1 ? Aligned : 0 }; typedef Map<Matrix<Scalar,Dynamic,1>, Alignment > MapVector; typedef Map<const Matrix<Scalar,Dynamic,1>, Alignment > ConstMapVector; if(rd==1) MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b); else if(rd==2) MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b) + B[1+d_end+j*ldb] * ConstMapVector(A+(d_end+1)*lda+ib, actual_b); else MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b) + B[1+d_end+j*ldb] * ConstMapVector(A+(d_end+1)*lda+ib, actual_b) + B[2+d_end+j*ldb] * ConstMapVector(A+(d_end+2)*lda+ib, actual_b); } } } // blocking on the rows of A and C } #undef KMADD } // namespace internal } // namespace Eigen #endif // EIGEN_SPARSELU_GEMM_KERNEL_H
10,216
35.359431
123
h
abess
abess-master/python/include/Eigen/src/StlSupport/StdVector.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud <[email protected]> // Copyright (C) 2009 Hauke Heibel <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_STDVECTOR_H #define EIGEN_STDVECTOR_H #include "details.h" /** * This section contains a convenience MACRO which allows an easy specialization of * std::vector such that for data types with alignment issues the correct allocator * is used automatically. */ #define EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(...) \ namespace std \ { \ template<> \ class vector<__VA_ARGS__, std::allocator<__VA_ARGS__> > \ : public vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \ { \ typedef vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > vector_base; \ public: \ typedef __VA_ARGS__ value_type; \ typedef vector_base::allocator_type allocator_type; \ typedef vector_base::size_type size_type; \ typedef vector_base::iterator iterator; \ explicit vector(const allocator_type& a = allocator_type()) : vector_base(a) {} \ template<typename InputIterator> \ vector(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : vector_base(first, last, a) {} \ vector(const vector& c) : vector_base(c) {} \ explicit vector(size_type num, const value_type& val = value_type()) : vector_base(num, val) {} \ vector(iterator start, iterator end) : vector_base(start, end) {} \ vector& operator=(const vector& x) { \ vector_base::operator=(x); \ return *this; \ } \ }; \ } // Don't specialize if containers are implemented according to C++11 #if !EIGEN_HAS_CXX11_CONTAINERS namespace std { #define EIGEN_STD_VECTOR_SPECIALIZATION_BODY \ public: \ typedef T value_type; \ typedef typename vector_base::allocator_type allocator_type; \ typedef typename vector_base::size_type size_type; \ typedef typename vector_base::iterator iterator; \ typedef typename vector_base::const_iterator const_iterator; \ explicit vector(const allocator_type& a = allocator_type()) : vector_base(a) {} \ template<typename InputIterator> \ vector(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) \ : vector_base(first, last, a) {} \ vector(const vector& c) : vector_base(c) {} \ explicit vector(size_type num, const value_type& val = value_type()) : vector_base(num, val) {} \ vector(iterator start, iterator end) : vector_base(start, end) {} \ vector& operator=(const vector& x) { \ vector_base::operator=(x); \ return *this; \ } template<typename T> class vector<T,EIGEN_ALIGNED_ALLOCATOR<T> > : public vector<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T), Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> > { typedef vector<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T), Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> > vector_base; EIGEN_STD_VECTOR_SPECIALIZATION_BODY void resize(size_type new_size) { resize(new_size, T()); } #if defined(_VECTOR_) // workaround MSVC std::vector implementation void resize(size_type new_size, const value_type& x) { if (vector_base::size() < new_size) vector_base::_Insert_n(vector_base::end(), new_size - vector_base::size(), x); else if (new_size < vector_base::size()) vector_base::erase(vector_base::begin() + new_size, vector_base::end()); } void push_back(const value_type& x) { vector_base::push_back(x); } using vector_base::insert; iterator insert(const_iterator position, const value_type& x) { return vector_base::insert(position,x); } void insert(const_iterator position, size_type new_size, const value_type& x) { vector_base::insert(position, new_size, x); } #elif defined(_GLIBCXX_VECTOR) && (!(EIGEN_GNUC_AT_LEAST(4,1))) /* Note that before gcc-4.1 we already have: std::vector::resize(size_type,const T&). * However, this specialization is still needed to make the above EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION trick to work. */ void resize(size_type new_size, const value_type& x) { vector_base::resize(new_size,x); } #elif defined(_GLIBCXX_VECTOR) && EIGEN_GNUC_AT_LEAST(4,2) // workaround GCC std::vector implementation void resize(size_type new_size, const value_type& x) { if (new_size < vector_base::size()) vector_base::_M_erase_at_end(this->_M_impl._M_start + new_size); else vector_base::insert(vector_base::end(), new_size - vector_base::size(), x); } #else // either GCC 4.1 or non-GCC // default implementation which should always work. void resize(size_type new_size, const value_type& x) { if (new_size < vector_base::size()) vector_base::erase(vector_base::begin() + new_size, vector_base::end()); else if (new_size > vector_base::size()) vector_base::insert(vector_base::end(), new_size - vector_base::size(), x); } #endif }; } #endif // !EIGEN_HAS_CXX11_CONTAINERS #endif // EIGEN_STDVECTOR_H
5,330
39.386364
130
h
abess
abess-master/python/include/Eigen/src/StlSupport/details.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud <[email protected]> // Copyright (C) 2009 Hauke Heibel <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_STL_DETAILS_H #define EIGEN_STL_DETAILS_H #ifndef EIGEN_ALIGNED_ALLOCATOR #define EIGEN_ALIGNED_ALLOCATOR Eigen::aligned_allocator #endif namespace Eigen { // This one is needed to prevent reimplementing the whole std::vector. template <class T> class aligned_allocator_indirection : public EIGEN_ALIGNED_ALLOCATOR<T> { public: typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; typedef T* pointer; typedef const T* const_pointer; typedef T& reference; typedef const T& const_reference; typedef T value_type; template<class U> struct rebind { typedef aligned_allocator_indirection<U> other; }; aligned_allocator_indirection() {} aligned_allocator_indirection(const aligned_allocator_indirection& ) : EIGEN_ALIGNED_ALLOCATOR<T>() {} aligned_allocator_indirection(const EIGEN_ALIGNED_ALLOCATOR<T>& ) {} template<class U> aligned_allocator_indirection(const aligned_allocator_indirection<U>& ) {} template<class U> aligned_allocator_indirection(const EIGEN_ALIGNED_ALLOCATOR<U>& ) {} ~aligned_allocator_indirection() {} }; #if EIGEN_COMP_MSVC // sometimes, MSVC detects, at compile time, that the argument x // in std::vector::resize(size_t s,T x) won't be aligned and generate an error // even if this function is never called. Whence this little wrapper. #define EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T) \ typename Eigen::internal::conditional< \ Eigen::internal::is_arithmetic<T>::value, \ T, \ Eigen::internal::workaround_msvc_stl_support<T> \ >::type namespace internal { template<typename T> struct workaround_msvc_stl_support : public T { inline workaround_msvc_stl_support() : T() {} inline workaround_msvc_stl_support(const T& other) : T(other) {} inline operator T& () { return *static_cast<T*>(this); } inline operator const T& () const { return *static_cast<const T*>(this); } template<typename OtherT> inline T& operator=(const OtherT& other) { T::operator=(other); return *this; } inline workaround_msvc_stl_support& operator=(const workaround_msvc_stl_support& other) { T::operator=(other); return *this; } }; } #else #define EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T) T #endif } #endif // EIGEN_STL_DETAILS_H
2,809
32.058824
106
h
abess
abess-master/python/include/Eigen/src/misc/Image.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Benoit Jacob <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MISC_IMAGE_H #define EIGEN_MISC_IMAGE_H namespace Eigen { namespace internal { /** \class image_retval_base * */ template<typename DecompositionType> struct traits<image_retval_base<DecompositionType> > { typedef typename DecompositionType::MatrixType MatrixType; typedef Matrix< typename MatrixType::Scalar, MatrixType::RowsAtCompileTime, // the image is a subspace of the destination space, whose // dimension is the number of rows of the original matrix Dynamic, // we don't know at compile time the dimension of the image (the rank) MatrixType::Options, MatrixType::MaxRowsAtCompileTime, // the image matrix will consist of columns from the original matrix, MatrixType::MaxColsAtCompileTime // so it has the same number of rows and at most as many columns. > ReturnType; }; template<typename _DecompositionType> struct image_retval_base : public ReturnByValue<image_retval_base<_DecompositionType> > { typedef _DecompositionType DecompositionType; typedef typename DecompositionType::MatrixType MatrixType; typedef ReturnByValue<image_retval_base> Base; image_retval_base(const DecompositionType& dec, const MatrixType& originalMatrix) : m_dec(dec), m_rank(dec.rank()), m_cols(m_rank == 0 ? 1 : m_rank), m_originalMatrix(originalMatrix) {} inline Index rows() const { return m_dec.rows(); } inline Index cols() const { return m_cols; } inline Index rank() const { return m_rank; } inline const DecompositionType& dec() const { return m_dec; } inline const MatrixType& originalMatrix() const { return m_originalMatrix; } template<typename Dest> inline void evalTo(Dest& dst) const { static_cast<const image_retval<DecompositionType>*>(this)->evalTo(dst); } protected: const DecompositionType& m_dec; Index m_rank, m_cols; const MatrixType& m_originalMatrix; }; } // end namespace internal #define EIGEN_MAKE_IMAGE_HELPERS(DecompositionType) \ typedef typename DecompositionType::MatrixType MatrixType; \ typedef typename MatrixType::Scalar Scalar; \ typedef typename MatrixType::RealScalar RealScalar; \ typedef Eigen::internal::image_retval_base<DecompositionType> Base; \ using Base::dec; \ using Base::originalMatrix; \ using Base::rank; \ using Base::rows; \ using Base::cols; \ image_retval(const DecompositionType& dec, const MatrixType& originalMatrix) \ : Base(dec, originalMatrix) {} } // end namespace Eigen #endif // EIGEN_MISC_IMAGE_H
2,913
34.108434
107
h
abess
abess-master/python/include/Eigen/src/misc/Kernel.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Benoit Jacob <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MISC_KERNEL_H #define EIGEN_MISC_KERNEL_H namespace Eigen { namespace internal { /** \class kernel_retval_base * */ template<typename DecompositionType> struct traits<kernel_retval_base<DecompositionType> > { typedef typename DecompositionType::MatrixType MatrixType; typedef Matrix< typename MatrixType::Scalar, MatrixType::ColsAtCompileTime, // the number of rows in the "kernel matrix" // is the number of cols of the original matrix // so that the product "matrix * kernel = zero" makes sense Dynamic, // we don't know at compile-time the dimension of the kernel MatrixType::Options, MatrixType::MaxColsAtCompileTime, // see explanation for 2nd template parameter MatrixType::MaxColsAtCompileTime // the kernel is a subspace of the domain space, // whose dimension is the number of columns of the original matrix > ReturnType; }; template<typename _DecompositionType> struct kernel_retval_base : public ReturnByValue<kernel_retval_base<_DecompositionType> > { typedef _DecompositionType DecompositionType; typedef ReturnByValue<kernel_retval_base> Base; explicit kernel_retval_base(const DecompositionType& dec) : m_dec(dec), m_rank(dec.rank()), m_cols(m_rank==dec.cols() ? 1 : dec.cols() - m_rank) {} inline Index rows() const { return m_dec.cols(); } inline Index cols() const { return m_cols; } inline Index rank() const { return m_rank; } inline const DecompositionType& dec() const { return m_dec; } template<typename Dest> inline void evalTo(Dest& dst) const { static_cast<const kernel_retval<DecompositionType>*>(this)->evalTo(dst); } protected: const DecompositionType& m_dec; Index m_rank, m_cols; }; } // end namespace internal #define EIGEN_MAKE_KERNEL_HELPERS(DecompositionType) \ typedef typename DecompositionType::MatrixType MatrixType; \ typedef typename MatrixType::Scalar Scalar; \ typedef typename MatrixType::RealScalar RealScalar; \ typedef Eigen::internal::kernel_retval_base<DecompositionType> Base; \ using Base::dec; \ using Base::rank; \ using Base::rows; \ using Base::cols; \ kernel_retval(const DecompositionType& dec) : Base(dec) {} } // end namespace Eigen #endif // EIGEN_MISC_KERNEL_H
2,742
33.2875
103
h
abess
abess-master/python/include/Eigen/src/misc/RealSvd2x2.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009-2010 Benoit Jacob <[email protected]> // Copyright (C) 2013-2016 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_REALSVD2X2_H #define EIGEN_REALSVD2X2_H namespace Eigen { namespace internal { template<typename MatrixType, typename RealScalar, typename Index> void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q, JacobiRotation<RealScalar> *j_left, JacobiRotation<RealScalar> *j_right) { using std::sqrt; using std::abs; Matrix<RealScalar,2,2> m; m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)), numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q)); JacobiRotation<RealScalar> rot1; RealScalar t = m.coeff(0,0) + m.coeff(1,1); RealScalar d = m.coeff(1,0) - m.coeff(0,1); if(abs(d) < (std::numeric_limits<RealScalar>::min)()) { rot1.s() = RealScalar(0); rot1.c() = RealScalar(1); } else { // If d!=0, then t/d cannot overflow because the magnitude of the // entries forming d are not too small compared to the ones forming t. RealScalar u = t / d; RealScalar tmp = sqrt(RealScalar(1) + numext::abs2(u)); rot1.s() = RealScalar(1) / tmp; rot1.c() = u / tmp; } m.applyOnTheLeft(0,1,rot1); j_right->makeJacobi(m,0,1); *j_left = rot1 * j_right->transpose(); } } // end namespace internal } // end namespace Eigen #endif // EIGEN_REALSVD2X2_H
1,748
30.232143
74
h
abess
abess-master/python/include/Eigen/src/plugins/MatrixCwiseUnaryOps.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <[email protected]> // Copyright (C) 2006-2008 Benoit Jacob <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. // This file is included into the body of the base classes supporting matrix specific coefficient-wise functions. // This include MatrixBase and SparseMatrixBase. typedef CwiseUnaryOp<internal::scalar_abs_op<Scalar>, const Derived> CwiseAbsReturnType; typedef CwiseUnaryOp<internal::scalar_abs2_op<Scalar>, const Derived> CwiseAbs2ReturnType; typedef CwiseUnaryOp<internal::scalar_sqrt_op<Scalar>, const Derived> CwiseSqrtReturnType; typedef CwiseUnaryOp<internal::scalar_sign_op<Scalar>, const Derived> CwiseSignReturnType; typedef CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const Derived> CwiseInverseReturnType; /// \returns an expression of the coefficient-wise absolute value of \c *this /// /// Example: \include MatrixBase_cwiseAbs.cpp /// Output: \verbinclude MatrixBase_cwiseAbs.out /// EIGEN_DOC_UNARY_ADDONS(cwiseAbs,absolute value) /// /// \sa cwiseAbs2() /// EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseAbsReturnType cwiseAbs() const { return CwiseAbsReturnType(derived()); } /// \returns an expression of the coefficient-wise squared absolute value of \c *this /// /// Example: \include MatrixBase_cwiseAbs2.cpp /// Output: \verbinclude MatrixBase_cwiseAbs2.out /// EIGEN_DOC_UNARY_ADDONS(cwiseAbs2,squared absolute value) /// /// \sa cwiseAbs() /// EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseAbs2ReturnType cwiseAbs2() const { return CwiseAbs2ReturnType(derived()); } /// \returns an expression of the coefficient-wise square root of *this. /// /// Example: \include MatrixBase_cwiseSqrt.cpp /// Output: \verbinclude MatrixBase_cwiseSqrt.out /// EIGEN_DOC_UNARY_ADDONS(cwiseSqrt,square-root) /// /// \sa cwisePow(), cwiseSquare() /// EIGEN_DEVICE_FUNC inline const CwiseSqrtReturnType cwiseSqrt() const { return CwiseSqrtReturnType(derived()); } /// \returns an expression of the coefficient-wise signum of *this. /// /// Example: \include MatrixBase_cwiseSign.cpp /// Output: \verbinclude MatrixBase_cwiseSign.out /// EIGEN_DOC_UNARY_ADDONS(cwiseSign,sign function) /// EIGEN_DEVICE_FUNC inline const CwiseSignReturnType cwiseSign() const { return CwiseSignReturnType(derived()); } /// \returns an expression of the coefficient-wise inverse of *this. /// /// Example: \include MatrixBase_cwiseInverse.cpp /// Output: \verbinclude MatrixBase_cwiseInverse.out /// EIGEN_DOC_UNARY_ADDONS(cwiseInverse,inverse) /// /// \sa cwiseProduct() /// EIGEN_DEVICE_FUNC inline const CwiseInverseReturnType cwiseInverse() const { return CwiseInverseReturnType(derived()); }
2,937
33.162791
113
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_BROADCASTING_H #define EIGEN_CXX11_TENSOR_TENSOR_BROADCASTING_H namespace Eigen { /** \class TensorBroadcasting * \ingroup CXX11_Tensor_Module * * \brief Tensor broadcasting class. * * */ namespace internal { template<typename Broadcast, typename XprType> struct traits<TensorBroadcastingOp<Broadcast, XprType> > : public traits<XprType> { typedef typename XprType::Scalar Scalar; typedef traits<XprType> XprTraits; typedef typename XprTraits::StorageKind StorageKind; typedef typename XprTraits::Index Index; typedef typename XprType::Nested Nested; typedef typename remove_reference<Nested>::type _Nested; static const int NumDimensions = XprTraits::NumDimensions; static const int Layout = XprTraits::Layout; }; template<typename Broadcast, typename XprType> struct eval<TensorBroadcastingOp<Broadcast, XprType>, Eigen::Dense> { typedef const TensorBroadcastingOp<Broadcast, XprType>& type; }; template<typename Broadcast, typename XprType> struct nested<TensorBroadcastingOp<Broadcast, XprType>, 1, typename eval<TensorBroadcastingOp<Broadcast, XprType> >::type> { typedef TensorBroadcastingOp<Broadcast, XprType> type; }; template <typename Dims> struct is_input_scalar { static const bool value = false; }; template <> struct is_input_scalar<Sizes<> > { static const bool value = true; }; #ifndef EIGEN_EMULATE_CXX11_META_H template <typename std::size_t... Indices> struct is_input_scalar<Sizes<Indices...> > { static const bool value = (Sizes<Indices...>::total_size == 1); }; #endif } // end namespace internal template<typename Broadcast, typename XprType> class TensorBroadcastingOp : public TensorBase<TensorBroadcastingOp<Broadcast, XprType>, ReadOnlyAccessors> { public: typedef typename Eigen::internal::traits<TensorBroadcastingOp>::Scalar Scalar; typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename Eigen::internal::nested<TensorBroadcastingOp>::type Nested; typedef typename Eigen::internal::traits<TensorBroadcastingOp>::StorageKind StorageKind; typedef typename Eigen::internal::traits<TensorBroadcastingOp>::Index Index; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBroadcastingOp(const XprType& expr, const Broadcast& broadcast) : m_xpr(expr), m_broadcast(broadcast) {} EIGEN_DEVICE_FUNC const Broadcast& broadcast() const { return m_broadcast; } EIGEN_DEVICE_FUNC const typename internal::remove_all<typename XprType::Nested>::type& expression() const { return m_xpr; } protected: typename XprType::Nested m_xpr; const Broadcast m_broadcast; }; // Eval as rvalue template<typename Broadcast, typename ArgType, typename Device> struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device> { typedef TensorBroadcastingOp<Broadcast, ArgType> XprType; typedef typename XprType::Index Index; static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value; typedef DSizes<Index, NumDims> Dimensions; typedef typename XprType::Scalar Scalar; typedef typename TensorEvaluator<ArgType, Device>::Dimensions InputDimensions; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType; static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; enum { IsAligned = true, PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess, Layout = TensorEvaluator<ArgType, Device>::Layout, RawAccess = false }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : m_broadcast(op.broadcast()),m_impl(op.expression(), device) { // The broadcasting op doesn't change the rank of the tensor. One can't broadcast a scalar // and store the result in a scalar. Instead one should reshape the scalar into a a N-D // tensor with N >= 1 of 1 element first and then broadcast. EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE); const InputDimensions& input_dims = m_impl.dimensions(); const Broadcast& broadcast = op.broadcast(); for (int i = 0; i < NumDims; ++i) { eigen_assert(input_dims[i] > 0); m_dimensions[i] = input_dims[i] * broadcast[i]; } if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { m_inputStrides[0] = 1; m_outputStrides[0] = 1; for (int i = 1; i < NumDims; ++i) { m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1]; m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1]; } } else { m_inputStrides[NumDims-1] = 1; m_outputStrides[NumDims-1] = 1; for (int i = NumDims-2; i >= 0; --i) { m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1]; m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1]; } } } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) { m_impl.evalSubExprsIfNeeded(NULL); return true; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { m_impl.cleanup(); } EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffReturnType coeff(Index index) const { if (internal::is_input_scalar<typename internal::remove_all<InputDimensions>::type>::value) { return m_impl.coeff(0); } if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { return coeffColMajor(index); } else { return coeffRowMajor(index); } } // TODO: attempt to speed this up. The integer divisions and modulo are slow EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeffColMajor(Index index) const { Index inputIndex = 0; for (int i = NumDims - 1; i > 0; --i) { const Index idx = index / m_outputStrides[i]; if (internal::index_statically_eq<Broadcast>(i, 1)) { eigen_assert(idx < m_impl.dimensions()[i]); inputIndex += idx * m_inputStrides[i]; } else { if (internal::index_statically_eq<InputDimensions>(i, 1)) { eigen_assert(idx % m_impl.dimensions()[i] == 0); } else { inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i]; } } index -= idx * m_outputStrides[i]; } if (internal::index_statically_eq<Broadcast>(0, 1)) { eigen_assert(index < m_impl.dimensions()[0]); inputIndex += index; } else { if (internal::index_statically_eq<InputDimensions>(0, 1)) { eigen_assert(index % m_impl.dimensions()[0] == 0); } else { inputIndex += (index % m_impl.dimensions()[0]); } } return m_impl.coeff(inputIndex); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeffRowMajor(Index index) const { Index inputIndex = 0; for (int i = 0; i < NumDims - 1; ++i) { const Index idx = index / m_outputStrides[i]; if (internal::index_statically_eq<Broadcast>(i, 1)) { eigen_assert(idx < m_impl.dimensions()[i]); inputIndex += idx * m_inputStrides[i]; } else { if (internal::index_statically_eq<InputDimensions>(i, 1)) { eigen_assert(idx % m_impl.dimensions()[i] == 0); } else { inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i]; } } index -= idx * m_outputStrides[i]; } if (internal::index_statically_eq<Broadcast>(NumDims-1, 1)) { eigen_assert(index < m_impl.dimensions()[NumDims-1]); inputIndex += index; } else { if (internal::index_statically_eq<InputDimensions>(NumDims-1, 1)) { eigen_assert(index % m_impl.dimensions()[NumDims-1] == 0); } else { inputIndex += (index % m_impl.dimensions()[NumDims-1]); } } return m_impl.coeff(inputIndex); } template<int LoadMode> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketReturnType packet(Index index) const { if (internal::is_input_scalar<typename internal::remove_all<InputDimensions>::type>::value) { return internal::pset1<PacketReturnType>(m_impl.coeff(0)); } if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { return packetColMajor<LoadMode>(index); } else { return packetRowMajor<LoadMode>(index); } } // Ignore the LoadMode and always use unaligned loads since we can't guarantee // the alignment at compile time. template<int LoadMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetColMajor(Index index) const { EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); const Index originalIndex = index; Index inputIndex = 0; for (int i = NumDims - 1; i > 0; --i) { const Index idx = index / m_outputStrides[i]; if (internal::index_statically_eq<Broadcast>(i, 1)) { eigen_assert(idx < m_impl.dimensions()[i]); inputIndex += idx * m_inputStrides[i]; } else { if (internal::index_statically_eq<InputDimensions>(i, 1)) { eigen_assert(idx % m_impl.dimensions()[i] == 0); } else { inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i]; } } index -= idx * m_outputStrides[i]; } Index innermostLoc; if (internal::index_statically_eq<Broadcast>(0, 1)) { eigen_assert(index < m_impl.dimensions()[0]); innermostLoc = index; } else { if (internal::index_statically_eq<InputDimensions>(0, 1)) { eigen_assert(index % m_impl.dimensions()[0] == 0); innermostLoc = 0; } else { innermostLoc = index % m_impl.dimensions()[0]; } } inputIndex += innermostLoc; // Todo: this could be extended to the second dimension if we're not // broadcasting alongside the first dimension, and so on. if (innermostLoc + PacketSize <= m_impl.dimensions()[0]) { return m_impl.template packet<Unaligned>(inputIndex); } else { EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize]; values[0] = m_impl.coeff(inputIndex); for (int i = 1; i < PacketSize; ++i) { values[i] = coeffColMajor(originalIndex+i); } PacketReturnType rslt = internal::pload<PacketReturnType>(values); return rslt; } } template<int LoadMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetRowMajor(Index index) const { EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); const Index originalIndex = index; Index inputIndex = 0; for (int i = 0; i < NumDims - 1; ++i) { const Index idx = index / m_outputStrides[i]; if (internal::index_statically_eq<Broadcast>(i, 1)) { eigen_assert(idx < m_impl.dimensions()[i]); inputIndex += idx * m_inputStrides[i]; } else { if (internal::index_statically_eq<InputDimensions>(i, 1)) { eigen_assert(idx % m_impl.dimensions()[i] == 0); } else { inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i]; } } index -= idx * m_outputStrides[i]; } Index innermostLoc; if (internal::index_statically_eq<Broadcast>(NumDims-1, 1)) { eigen_assert(index < m_impl.dimensions()[NumDims-1]); innermostLoc = index; } else { if (internal::index_statically_eq<InputDimensions>(NumDims-1, 1)) { eigen_assert(index % m_impl.dimensions()[NumDims-1] == 0); innermostLoc = 0; } else { innermostLoc = index % m_impl.dimensions()[NumDims-1]; } } inputIndex += innermostLoc; // Todo: this could be extended to the second dimension if we're not // broadcasting alongside the first dimension, and so on. if (innermostLoc + PacketSize <= m_impl.dimensions()[NumDims-1]) { return m_impl.template packet<Unaligned>(inputIndex); } else { EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize]; values[0] = m_impl.coeff(inputIndex); for (int i = 1; i < PacketSize; ++i) { values[i] = coeffRowMajor(originalIndex+i); } PacketReturnType rslt = internal::pload<PacketReturnType>(values); return rslt; } } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { double compute_cost = TensorOpCost::AddCost<Index>(); if (NumDims > 0) { for (int i = NumDims - 1; i > 0; --i) { compute_cost += TensorOpCost::DivCost<Index>(); if (internal::index_statically_eq<Broadcast>(i, 1)) { compute_cost += TensorOpCost::MulCost<Index>() + TensorOpCost::AddCost<Index>(); } else { if (!internal::index_statically_eq<InputDimensions>(i, 1)) { compute_cost += TensorOpCost::MulCost<Index>() + TensorOpCost::ModCost<Index>() + TensorOpCost::AddCost<Index>(); } } compute_cost += TensorOpCost::MulCost<Index>() + TensorOpCost::AddCost<Index>(); } } return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, compute_cost, vectorized, PacketSize); } EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; } const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; } Broadcast functor() const { return m_broadcast; } protected: const Broadcast m_broadcast; Dimensions m_dimensions; array<Index, NumDims> m_outputStrides; array<Index, NumDims> m_inputStrides; TensorEvaluator<ArgType, Device> m_impl; }; } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_BROADCASTING_H
14,286
35.35369
122
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H #define EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H namespace Eigen { /** \class TensorKChippingReshaping * \ingroup CXX11_Tensor_Module * * \brief A chip is a thin slice, corresponding to a column or a row in a 2-d tensor. * * */ namespace internal { template<DenseIndex DimId, typename XprType> struct traits<TensorChippingOp<DimId, XprType> > : public traits<XprType> { typedef typename XprType::Scalar Scalar; typedef traits<XprType> XprTraits; typedef typename XprTraits::StorageKind StorageKind; typedef typename XprTraits::Index Index; typedef typename XprType::Nested Nested; typedef typename remove_reference<Nested>::type _Nested; static const int NumDimensions = XprTraits::NumDimensions - 1; static const int Layout = XprTraits::Layout; }; template<DenseIndex DimId, typename XprType> struct eval<TensorChippingOp<DimId, XprType>, Eigen::Dense> { typedef const TensorChippingOp<DimId, XprType>& type; }; template<DenseIndex DimId, typename XprType> struct nested<TensorChippingOp<DimId, XprType>, 1, typename eval<TensorChippingOp<DimId, XprType> >::type> { typedef TensorChippingOp<DimId, XprType> type; }; template <DenseIndex DimId> struct DimensionId { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DimensionId(DenseIndex dim) { eigen_assert(dim == DimId); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex actualDim() const { return DimId; } }; template <> struct DimensionId<Dynamic> { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DimensionId(DenseIndex dim) : actual_dim(dim) { eigen_assert(dim >= 0); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex actualDim() const { return actual_dim; } private: const DenseIndex actual_dim; }; } // end namespace internal template<DenseIndex DimId, typename XprType> class TensorChippingOp : public TensorBase<TensorChippingOp<DimId, XprType> > { public: typedef typename Eigen::internal::traits<TensorChippingOp>::Scalar Scalar; typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename Eigen::internal::nested<TensorChippingOp>::type Nested; typedef typename Eigen::internal::traits<TensorChippingOp>::StorageKind StorageKind; typedef typename Eigen::internal::traits<TensorChippingOp>::Index Index; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorChippingOp(const XprType& expr, const Index offset, const Index dim) : m_xpr(expr), m_offset(offset), m_dim(dim) { } EIGEN_DEVICE_FUNC const Index offset() const { return m_offset; } EIGEN_DEVICE_FUNC const Index dim() const { return m_dim.actualDim(); } EIGEN_DEVICE_FUNC const typename internal::remove_all<typename XprType::Nested>::type& expression() const { return m_xpr; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorChippingOp& operator = (const TensorChippingOp& other) { typedef TensorAssignOp<TensorChippingOp, const TensorChippingOp> Assign; Assign assign(*this, other); internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice()); return *this; } template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorChippingOp& operator = (const OtherDerived& other) { typedef TensorAssignOp<TensorChippingOp, const OtherDerived> Assign; Assign assign(*this, other); internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice()); return *this; } protected: typename XprType::Nested m_xpr; const Index m_offset; const internal::DimensionId<DimId> m_dim; }; // Eval as rvalue template<DenseIndex DimId, typename ArgType, typename Device> struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device> { typedef TensorChippingOp<DimId, ArgType> XprType; static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value; static const int NumDims = NumInputDims-1; typedef typename XprType::Index Index; typedef DSizes<Index, NumDims> Dimensions; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType; static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; enum { // Alignment can't be guaranteed at compile time since it depends on the // slice offsets. IsAligned = false, PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess, Layout = TensorEvaluator<ArgType, Device>::Layout, CoordAccess = false, // to be implemented RawAccess = false }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : m_impl(op.expression(), device), m_dim(op.dim()), m_device(device) { EIGEN_STATIC_ASSERT((NumInputDims >= 1), YOU_MADE_A_PROGRAMMING_MISTAKE); eigen_assert(NumInputDims > m_dim.actualDim()); const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions(); eigen_assert(op.offset() < input_dims[m_dim.actualDim()]); int j = 0; for (int i = 0; i < NumInputDims; ++i) { if (i != m_dim.actualDim()) { m_dimensions[j] = input_dims[i]; ++j; } } m_stride = 1; m_inputStride = 1; if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { for (int i = 0; i < m_dim.actualDim(); ++i) { m_stride *= input_dims[i]; m_inputStride *= input_dims[i]; } } else { for (int i = NumInputDims-1; i > m_dim.actualDim(); --i) { m_stride *= input_dims[i]; m_inputStride *= input_dims[i]; } } m_inputStride *= input_dims[m_dim.actualDim()]; m_inputOffset = m_stride * op.offset(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) { m_impl.evalSubExprsIfNeeded(NULL); return true; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { m_impl.cleanup(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_impl.coeff(srcCoeff(index)); } template<int LoadMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == 0) || (static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == NumInputDims-1)) { // m_stride is equal to 1, so let's avoid the integer division. eigen_assert(m_stride == 1); Index inputIndex = index * m_inputStride + m_inputOffset; EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize]; for (int i = 0; i < PacketSize; ++i) { values[i] = m_impl.coeff(inputIndex); inputIndex += m_inputStride; } PacketReturnType rslt = internal::pload<PacketReturnType>(values); return rslt; } else if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == NumInputDims - 1) || (static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == 0)) { // m_stride is aways greater than index, so let's avoid the integer division. eigen_assert(m_stride > index); return m_impl.template packet<LoadMode>(index + m_inputOffset); } else { const Index idx = index / m_stride; const Index rem = index - idx * m_stride; if (rem + PacketSize <= m_stride) { Index inputIndex = idx * m_inputStride + m_inputOffset + rem; return m_impl.template packet<LoadMode>(inputIndex); } else { // Cross the stride boundary. Fallback to slow path. EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize]; for (int i = 0; i < PacketSize; ++i) { values[i] = coeff(index); ++index; } PacketReturnType rslt = internal::pload<PacketReturnType>(values); return rslt; } } } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { double cost = 0; if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == 0) || (static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == NumInputDims - 1)) { cost += TensorOpCost::MulCost<Index>() + TensorOpCost::AddCost<Index>(); } else if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == NumInputDims - 1) || (static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == 0)) { cost += TensorOpCost::AddCost<Index>(); } else { cost += 3 * TensorOpCost::MulCost<Index>() + TensorOpCost::DivCost<Index>() + 3 * TensorOpCost::AddCost<Index>(); } return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, cost, vectorized, PacketSize); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType* data() const { CoeffReturnType* result = const_cast<CoeffReturnType*>(m_impl.data()); if (((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == NumDims) || (static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == 0)) && result) { return result + m_inputOffset; } else { return NULL; } } protected: EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const { Index inputIndex; if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == 0) || (static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == NumInputDims-1)) { // m_stride is equal to 1, so let's avoid the integer division. eigen_assert(m_stride == 1); inputIndex = index * m_inputStride + m_inputOffset; } else if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == NumInputDims-1) || (static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == 0)) { // m_stride is aways greater than index, so let's avoid the integer division. eigen_assert(m_stride > index); inputIndex = index + m_inputOffset; } else { const Index idx = index / m_stride; inputIndex = idx * m_inputStride + m_inputOffset; index -= idx * m_stride; inputIndex += index; } return inputIndex; } Dimensions m_dimensions; Index m_stride; Index m_inputOffset; Index m_inputStride; TensorEvaluator<ArgType, Device> m_impl; const internal::DimensionId<DimId> m_dim; const Device& m_device; }; // Eval as lvalue template<DenseIndex DimId, typename ArgType, typename Device> struct TensorEvaluator<TensorChippingOp<DimId, ArgType>, Device> : public TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device> { typedef TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device> Base; typedef TensorChippingOp<DimId, ArgType> XprType; static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value; static const int NumDims = NumInputDims-1; typedef typename XprType::Index Index; typedef DSizes<Index, NumDims> Dimensions; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType; static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; enum { IsAligned = false, PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess, RawAccess = false }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : Base(op, device) { } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) { return this->m_impl.coeffRef(this->srcCoeff(index)); } template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writePacket(Index index, const PacketReturnType& x) { EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) if ((static_cast<int>(this->Layout) == static_cast<int>(ColMajor) && this->m_dim.actualDim() == 0) || (static_cast<int>(this->Layout) == static_cast<int>(RowMajor) && this->m_dim.actualDim() == NumInputDims-1)) { // m_stride is equal to 1, so let's avoid the integer division. eigen_assert(this->m_stride == 1); EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize]; internal::pstore<CoeffReturnType, PacketReturnType>(values, x); Index inputIndex = index * this->m_inputStride + this->m_inputOffset; for (int i = 0; i < PacketSize; ++i) { this->m_impl.coeffRef(inputIndex) = values[i]; inputIndex += this->m_inputStride; } } else if ((static_cast<int>(this->Layout) == static_cast<int>(ColMajor) && this->m_dim.actualDim() == NumInputDims-1) || (static_cast<int>(this->Layout) == static_cast<int>(RowMajor) && this->m_dim.actualDim() == 0)) { // m_stride is aways greater than index, so let's avoid the integer division. eigen_assert(this->m_stride > index); this->m_impl.template writePacket<StoreMode>(index + this->m_inputOffset, x); } else { const Index idx = index / this->m_stride; const Index rem = index - idx * this->m_stride; if (rem + PacketSize <= this->m_stride) { const Index inputIndex = idx * this->m_inputStride + this->m_inputOffset + rem; this->m_impl.template writePacket<StoreMode>(inputIndex, x); } else { // Cross stride boundary. Fallback to slow path. EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize]; internal::pstore<CoeffReturnType, PacketReturnType>(values, x); for (int i = 0; i < PacketSize; ++i) { this->coeffRef(index) = values[i]; ++index; } } } } }; } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H
14,755
37.327273
125
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_BLOCKING_H #define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_BLOCKING_H namespace Eigen { namespace internal { enum { ShardByRow = 0, ShardByCol = 1 }; // Default Blocking Strategy template <typename LhsMapper, typename RhsMapper, typename Index, int ShardingType=ShardByCol> class TensorContractionBlocking { public: typedef typename LhsMapper::Scalar LhsScalar; typedef typename RhsMapper::Scalar RhsScalar; EIGEN_DEVICE_FUNC TensorContractionBlocking(Index k, Index m, Index n, Index num_threads = 1) : kc_(k), mc_(m), nc_(n) { if (ShardingType == ShardByCol) { computeProductBlockingSizes<LhsScalar, RhsScalar, 1>(kc_, mc_, nc_, num_threads); } else { computeProductBlockingSizes<LhsScalar, RhsScalar, 1>(kc_, nc_, mc_, num_threads); } } EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index kc() const { return kc_; } EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index mc() const { return mc_; } EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index nc() const { return nc_; } private: Index kc_; Index mc_; Index nc_; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_BLOCKING_H
1,594
26.982456
97
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Rasmus Munk Larsen <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_COST_MODEL_H #define EIGEN_CXX11_TENSOR_TENSOR_COST_MODEL_H namespace Eigen { /** \class TensorEvaluator * \ingroup CXX11_Tensor_Module * * \brief A cost model used to limit the number of threads used for evaluating * tensor expression. * */ // Class storing the cost of evaluating a tensor expression in terms of the // estimated number of operand bytes loads, bytes stored, and compute cycles. class TensorOpCost { public: // TODO(rmlarsen): Fix the scalar op costs in Eigen proper. Even a simple // model based on minimal reciprocal throughput numbers from Intel or // Agner Fog's tables would be better than what is there now. template <typename ArgType> static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int MulCost() { return internal::functor_traits< internal::scalar_product_op<ArgType, ArgType> >::Cost; } template <typename ArgType> static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int AddCost() { return internal::functor_traits<internal::scalar_sum_op<ArgType> >::Cost; } template <typename ArgType> static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int DivCost() { return internal::functor_traits< internal::scalar_quotient_op<ArgType, ArgType> >::Cost; } template <typename ArgType> static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int ModCost() { return internal::functor_traits<internal::scalar_mod_op<ArgType> >::Cost; } template <typename SrcType, typename TargetType> static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int CastCost() { return internal::functor_traits< internal::scalar_cast_op<SrcType, TargetType> >::Cost; } EIGEN_DEVICE_FUNC TensorOpCost() : bytes_loaded_(0), bytes_stored_(0), compute_cycles_(0) {} EIGEN_DEVICE_FUNC TensorOpCost(double bytes_loaded, double bytes_stored, double compute_cycles) : bytes_loaded_(bytes_loaded), bytes_stored_(bytes_stored), compute_cycles_(compute_cycles) {} EIGEN_DEVICE_FUNC TensorOpCost(double bytes_loaded, double bytes_stored, double compute_cycles, bool vectorized, double packet_size) : bytes_loaded_(bytes_loaded), bytes_stored_(bytes_stored), compute_cycles_(vectorized ? compute_cycles / packet_size : compute_cycles) { eigen_assert(bytes_loaded >= 0 && (numext::isfinite)(bytes_loaded)); eigen_assert(bytes_stored >= 0 && (numext::isfinite)(bytes_stored)); eigen_assert(compute_cycles >= 0 && (numext::isfinite)(compute_cycles)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bytes_loaded() const { return bytes_loaded_; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bytes_stored() const { return bytes_stored_; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double compute_cycles() const { return compute_cycles_; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double total_cost( double load_cost, double store_cost, double compute_cost) const { return load_cost * bytes_loaded_ + store_cost * bytes_stored_ + compute_cost * compute_cycles_; } // Drop memory access component. Intended for cases when memory accesses are // sequential or are completely masked by computations. EIGEN_DEVICE_FUNC void dropMemoryCost() { bytes_loaded_ = 0; bytes_stored_ = 0; } // TODO(rmlarsen): Define min in terms of total cost, not elementwise. EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost cwiseMin( const TensorOpCost& rhs) const { double bytes_loaded = numext::mini(bytes_loaded_, rhs.bytes_loaded()); double bytes_stored = numext::mini(bytes_stored_, rhs.bytes_stored()); double compute_cycles = numext::mini(compute_cycles_, rhs.compute_cycles()); return TensorOpCost(bytes_loaded, bytes_stored, compute_cycles); } // TODO(rmlarsen): Define max in terms of total cost, not elementwise. EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost cwiseMax( const TensorOpCost& rhs) const { double bytes_loaded = numext::maxi(bytes_loaded_, rhs.bytes_loaded()); double bytes_stored = numext::maxi(bytes_stored_, rhs.bytes_stored()); double compute_cycles = numext::maxi(compute_cycles_, rhs.compute_cycles()); return TensorOpCost(bytes_loaded, bytes_stored, compute_cycles); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost& operator+=( const TensorOpCost& rhs) { bytes_loaded_ += rhs.bytes_loaded(); bytes_stored_ += rhs.bytes_stored(); compute_cycles_ += rhs.compute_cycles(); return *this; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost& operator*=(double rhs) { bytes_loaded_ *= rhs; bytes_stored_ *= rhs; compute_cycles_ *= rhs; return *this; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend TensorOpCost operator+( TensorOpCost lhs, const TensorOpCost& rhs) { lhs += rhs; return lhs; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend TensorOpCost operator*( TensorOpCost lhs, double rhs) { lhs *= rhs; return lhs; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend TensorOpCost operator*( double lhs, TensorOpCost rhs) { rhs *= lhs; return rhs; } friend std::ostream& operator<<(std::ostream& os, const TensorOpCost& tc) { return os << "[bytes_loaded = " << tc.bytes_loaded() << ", bytes_stored = " << tc.bytes_stored() << ", compute_cycles = " << tc.compute_cycles() << "]"; } private: double bytes_loaded_; double bytes_stored_; double compute_cycles_; }; // TODO(rmlarsen): Implement a policy that chooses an "optimal" number of theads // in [1:max_threads] instead of just switching multi-threading off for small // work units. template <typename Device> class TensorCostModel { public: // Scaling from Eigen compute cost to device cycles. static const int kDeviceCyclesPerComputeCycle = 1; // Costs in device cycles. static const int kStartupCycles = 100000; static const int kPerThreadCycles = 100000; static const int kTaskSize = 40000; // Returns the number of threads in [1:max_threads] to use for // evaluating an expression with the given output size and cost per // coefficient. static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int numThreads( double output_size, const TensorOpCost& cost_per_coeff, int max_threads) { double cost = totalCost(output_size, cost_per_coeff); int threads = (cost - kStartupCycles) / kPerThreadCycles + 0.9; return numext::mini(max_threads, numext::maxi(1, threads)); } // taskSize assesses parallel task size. // Value of 1.0 means ideal parallel task size. Values < 1.0 mean that task // granularity needs to be increased to mitigate parallelization overheads. static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double taskSize( double output_size, const TensorOpCost& cost_per_coeff) { return totalCost(output_size, cost_per_coeff) / kTaskSize; } private: static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double totalCost( double output_size, const TensorOpCost& cost_per_coeff) { // Cost of memory fetches from L2 cache. 64 is typical cache line size. // 11 is L2 cache latency on Haswell. // We don't know whether data is in L1, L2 or L3. But we are most interested // in single-threaded computational time around 100us-10ms (smaller time // is too small for parallelization, larger time is not intersting // either because we are probably using all available threads already). // And for the target time range, L2 seems to be what matters. Data set // fitting into L1 is too small to take noticeable time. Data set fitting // only into L3 presumably will take more than 10ms to load and process. const double kLoadCycles = 1.0 / 64 * 11; const double kStoreCycles = 1.0 / 64 * 11; // Scaling from Eigen compute cost to device cycles. return output_size * cost_per_coeff.total_cost(kLoadCycles, kStoreCycles, kDeviceCyclesPerComputeCycle); } }; } // namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_COST_MODEL_H
8,443
38.643192
80
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorDevice.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_DEVICE_H #define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_H namespace Eigen { /** \class TensorDevice * \ingroup CXX11_Tensor_Module * * \brief Pseudo expression providing an operator = that will evaluate its argument * on the specified computing 'device' (GPU, thread pool, ...) * * Example: * C.device(EIGEN_GPU) = A + B; * * Todo: operator *= and /=. */ template <typename ExpressionType, typename DeviceType> class TensorDevice { public: TensorDevice(const DeviceType& device, ExpressionType& expression) : m_device(device), m_expression(expression) {} template<typename OtherDerived> EIGEN_STRONG_INLINE TensorDevice& operator=(const OtherDerived& other) { typedef TensorAssignOp<ExpressionType, const OtherDerived> Assign; Assign assign(m_expression, other); internal::TensorExecutor<const Assign, DeviceType>::run(assign, m_device); return *this; } template<typename OtherDerived> EIGEN_STRONG_INLINE TensorDevice& operator+=(const OtherDerived& other) { typedef typename OtherDerived::Scalar Scalar; typedef TensorCwiseBinaryOp<internal::scalar_sum_op<Scalar>, const ExpressionType, const OtherDerived> Sum; Sum sum(m_expression, other); typedef TensorAssignOp<ExpressionType, const Sum> Assign; Assign assign(m_expression, sum); internal::TensorExecutor<const Assign, DeviceType>::run(assign, m_device); return *this; } template<typename OtherDerived> EIGEN_STRONG_INLINE TensorDevice& operator-=(const OtherDerived& other) { typedef typename OtherDerived::Scalar Scalar; typedef TensorCwiseBinaryOp<internal::scalar_difference_op<Scalar>, const ExpressionType, const OtherDerived> Difference; Difference difference(m_expression, other); typedef TensorAssignOp<ExpressionType, const Difference> Assign; Assign assign(m_expression, difference); internal::TensorExecutor<const Assign, DeviceType>::run(assign, m_device); return *this; } protected: const DeviceType& m_device; ExpressionType& m_expression; }; } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_H
2,570
36.26087
127
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #if defined(EIGEN_USE_GPU) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_CUDA_H) #define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_CUDA_H namespace Eigen { static const int kCudaScratchSize = 1024; // This defines an interface that GPUDevice can take to use // CUDA streams underneath. class StreamInterface { public: virtual ~StreamInterface() {} virtual const cudaStream_t& stream() const = 0; virtual const cudaDeviceProp& deviceProperties() const = 0; // Allocate memory on the actual device where the computation will run virtual void* allocate(size_t num_bytes) const = 0; virtual void deallocate(void* buffer) const = 0; // Return a scratchpad buffer of size 1k virtual void* scratchpad() const = 0; // Return a semaphore. The semaphore is initially initialized to 0, and // each kernel using it is responsible for resetting to 0 upon completion // to maintain the invariant that the semaphore is always equal to 0 upon // each kernel start. virtual unsigned int* semaphore() const = 0; }; static cudaDeviceProp* m_deviceProperties; static bool m_devicePropInitialized = false; static void initializeDeviceProp() { if (!m_devicePropInitialized) { // Attempts to ensure proper behavior in the case of multiple threads // calling this function simultaneously. This would be trivial to // implement if we could use std::mutex, but unfortunately mutex don't // compile with nvcc, so we resort to atomics and thread fences instead. // Note that if the caller uses a compiler that doesn't support c++11 we // can't ensure that the initialization is thread safe. #if __cplusplus >= 201103L static std::atomic<bool> first(true); if (first.exchange(false)) { #else static bool first = true; if (first) { first = false; #endif // We're the first thread to reach this point. int num_devices; cudaError_t status = cudaGetDeviceCount(&num_devices); if (status != cudaSuccess) { std::cerr << "Failed to get the number of CUDA devices: " << cudaGetErrorString(status) << std::endl; assert(status == cudaSuccess); } m_deviceProperties = new cudaDeviceProp[num_devices]; for (int i = 0; i < num_devices; ++i) { status = cudaGetDeviceProperties(&m_deviceProperties[i], i); if (status != cudaSuccess) { std::cerr << "Failed to initialize CUDA device #" << i << ": " << cudaGetErrorString(status) << std::endl; assert(status == cudaSuccess); } } #if __cplusplus >= 201103L std::atomic_thread_fence(std::memory_order_release); #endif m_devicePropInitialized = true; } else { // Wait for the other thread to inititialize the properties. while (!m_devicePropInitialized) { #if __cplusplus >= 201103L std::atomic_thread_fence(std::memory_order_acquire); #endif sleep(1); } } } } static const cudaStream_t default_stream = cudaStreamDefault; class CudaStreamDevice : public StreamInterface { public: // Use the default stream on the current device CudaStreamDevice() : stream_(&default_stream), scratch_(NULL), semaphore_(NULL) { cudaGetDevice(&device_); initializeDeviceProp(); } // Use the default stream on the specified device CudaStreamDevice(int device) : stream_(&default_stream), device_(device), scratch_(NULL), semaphore_(NULL) { initializeDeviceProp(); } // Use the specified stream. Note that it's the // caller responsibility to ensure that the stream can run on // the specified device. If no device is specified the code // assumes that the stream is associated to the current gpu device. CudaStreamDevice(const cudaStream_t* stream, int device = -1) : stream_(stream), device_(device), scratch_(NULL), semaphore_(NULL) { if (device < 0) { cudaGetDevice(&device_); } else { int num_devices; cudaError_t err = cudaGetDeviceCount(&num_devices); EIGEN_UNUSED_VARIABLE(err) assert(err == cudaSuccess); assert(device < num_devices); device_ = device; } initializeDeviceProp(); } virtual ~CudaStreamDevice() { if (scratch_) { deallocate(scratch_); } } const cudaStream_t& stream() const { return *stream_; } const cudaDeviceProp& deviceProperties() const { return m_deviceProperties[device_]; } virtual void* allocate(size_t num_bytes) const { cudaError_t err = cudaSetDevice(device_); EIGEN_UNUSED_VARIABLE(err) assert(err == cudaSuccess); void* result; err = cudaMalloc(&result, num_bytes); assert(err == cudaSuccess); assert(result != NULL); return result; } virtual void deallocate(void* buffer) const { cudaError_t err = cudaSetDevice(device_); EIGEN_UNUSED_VARIABLE(err) assert(err == cudaSuccess); assert(buffer != NULL); err = cudaFree(buffer); assert(err == cudaSuccess); } virtual void* scratchpad() const { if (scratch_ == NULL) { scratch_ = allocate(kCudaScratchSize + sizeof(unsigned int)); } return scratch_; } virtual unsigned int* semaphore() const { if (semaphore_ == NULL) { char* scratch = static_cast<char*>(scratchpad()) + kCudaScratchSize; semaphore_ = reinterpret_cast<unsigned int*>(scratch); cudaError_t err = cudaMemsetAsync(semaphore_, 0, sizeof(unsigned int), *stream_); EIGEN_UNUSED_VARIABLE(err) assert(err == cudaSuccess); } return semaphore_; } private: const cudaStream_t* stream_; int device_; mutable void* scratch_; mutable unsigned int* semaphore_; }; struct GpuDevice { // The StreamInterface is not owned: the caller is // responsible for its initialization and eventual destruction. explicit GpuDevice(const StreamInterface* stream) : stream_(stream), max_blocks_(INT_MAX) { eigen_assert(stream); } explicit GpuDevice(const StreamInterface* stream, int num_blocks) : stream_(stream), max_blocks_(num_blocks) { eigen_assert(stream); } // TODO(bsteiner): This is an internal API, we should not expose it. EIGEN_STRONG_INLINE const cudaStream_t& stream() const { return stream_->stream(); } EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const { return stream_->allocate(num_bytes); } EIGEN_STRONG_INLINE void deallocate(void* buffer) const { stream_->deallocate(buffer); } EIGEN_STRONG_INLINE void* scratchpad() const { return stream_->scratchpad(); } EIGEN_STRONG_INLINE unsigned int* semaphore() const { return stream_->semaphore(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const { #ifndef __CUDA_ARCH__ cudaError_t err = cudaMemcpyAsync(dst, src, n, cudaMemcpyDeviceToDevice, stream_->stream()); EIGEN_UNUSED_VARIABLE(err) assert(err == cudaSuccess); #else eigen_assert(false && "The default device should be used instead to generate kernel code"); #endif } EIGEN_STRONG_INLINE void memcpyHostToDevice(void* dst, const void* src, size_t n) const { cudaError_t err = cudaMemcpyAsync(dst, src, n, cudaMemcpyHostToDevice, stream_->stream()); EIGEN_UNUSED_VARIABLE(err) assert(err == cudaSuccess); } EIGEN_STRONG_INLINE void memcpyDeviceToHost(void* dst, const void* src, size_t n) const { cudaError_t err = cudaMemcpyAsync(dst, src, n, cudaMemcpyDeviceToHost, stream_->stream()); EIGEN_UNUSED_VARIABLE(err) assert(err == cudaSuccess); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void* buffer, int c, size_t n) const { #ifndef __CUDA_ARCH__ cudaError_t err = cudaMemsetAsync(buffer, c, n, stream_->stream()); EIGEN_UNUSED_VARIABLE(err) assert(err == cudaSuccess); #else eigen_assert(false && "The default device should be used instead to generate kernel code"); #endif } EIGEN_STRONG_INLINE size_t numThreads() const { // FIXME return 32; } EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const { // FIXME return 48*1024; } EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const { // We won't try to take advantage of the l2 cache for the time being, and // there is no l3 cache on cuda devices. return firstLevelCacheSize(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void synchronize() const { #if defined(__CUDACC__) && !defined(__CUDA_ARCH__) cudaError_t err = cudaStreamSynchronize(stream_->stream()); if (err != cudaSuccess) { std::cerr << "Error detected in CUDA stream: " << cudaGetErrorString(err) << std::endl; assert(err == cudaSuccess); } #else assert(false && "The default device should be used instead to generate kernel code"); #endif } EIGEN_STRONG_INLINE int getNumCudaMultiProcessors() const { return stream_->deviceProperties().multiProcessorCount; } EIGEN_STRONG_INLINE int maxCudaThreadsPerBlock() const { return stream_->deviceProperties().maxThreadsPerBlock; } EIGEN_STRONG_INLINE int maxCudaThreadsPerMultiProcessor() const { return stream_->deviceProperties().maxThreadsPerMultiProcessor; } EIGEN_STRONG_INLINE int sharedMemPerBlock() const { return stream_->deviceProperties().sharedMemPerBlock; } EIGEN_STRONG_INLINE int majorDeviceVersion() const { return stream_->deviceProperties().major; } EIGEN_STRONG_INLINE int minorDeviceVersion() const { return stream_->deviceProperties().minor; } EIGEN_STRONG_INLINE int maxBlocks() const { return max_blocks_; } // This function checks if the CUDA runtime recorded an error for the // underlying stream device. inline bool ok() const { #ifdef __CUDACC__ cudaError_t error = cudaStreamQuery(stream_->stream()); return (error == cudaSuccess) || (error == cudaErrorNotReady); #else return false; #endif } private: const StreamInterface* stream_; int max_blocks_; }; #define LAUNCH_CUDA_KERNEL(kernel, gridsize, blocksize, sharedmem, device, ...) \ (kernel) <<< (gridsize), (blocksize), (sharedmem), (device).stream() >>> (__VA_ARGS__); \ assert(cudaGetLastError() == cudaSuccess); // FIXME: Should be device and kernel specific. #ifdef __CUDACC__ static EIGEN_DEVICE_FUNC inline void setCudaSharedMemConfig(cudaSharedMemConfig config) { #ifndef __CUDA_ARCH__ cudaError_t status = cudaDeviceSetSharedMemConfig(config); EIGEN_UNUSED_VARIABLE(status) assert(status == cudaSuccess); #else EIGEN_UNUSED_VARIABLE(config) #endif } #endif } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_CUDA_H
11,080
31.784024
112
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_DEVICE_DEFAULT_H #define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_DEFAULT_H namespace Eigen { // Default device for the machine (typically a single cpu core) struct DefaultDevice { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const { return internal::aligned_malloc(num_bytes); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void deallocate(void* buffer) const { internal::aligned_free(buffer); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const { ::memcpy(dst, src, n); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyHostToDevice(void* dst, const void* src, size_t n) const { memcpy(dst, src, n); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyDeviceToHost(void* dst, const void* src, size_t n) const { memcpy(dst, src, n); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void* buffer, int c, size_t n) const { ::memset(buffer, c, n); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t numThreads() const { #ifndef __CUDA_ARCH__ // Running on the host CPU return 1; #else // Running on a CUDA device return 32; #endif } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const { #ifndef __CUDA_ARCH__ // Running on the host CPU return l1CacheSize(); #else // Running on a CUDA device, return the amount of shared memory available. return 48*1024; #endif } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const { #ifndef __CUDA_ARCH__ // Running single threaded on the host CPU return l3CacheSize(); #else // Running on a CUDA device return firstLevelCacheSize(); #endif } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const { #ifndef __CUDA_ARCH__ // Running single threaded on the host CPU // Should return an enum that encodes the ISA supported by the CPU return 1; #else // Running on a CUDA device return __CUDA_ARCH__ / 100; #endif } }; } // namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_DEFAULT_H
2,474
29.182927
109
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #if defined(EIGEN_USE_THREADS) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_THREAD_POOL_H) #define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_THREAD_POOL_H namespace Eigen { // Use the SimpleThreadPool by default. We'll switch to the new non blocking // thread pool later. #ifndef EIGEN_USE_SIMPLE_THREAD_POOL template <typename Env> using ThreadPoolTempl = NonBlockingThreadPoolTempl<Env>; typedef NonBlockingThreadPool ThreadPool; #else template <typename Env> using ThreadPoolTempl = SimpleThreadPoolTempl<Env>; typedef SimpleThreadPool ThreadPool; #endif // Barrier is an object that allows one or more threads to wait until // Notify has been called a specified number of times. class Barrier { public: Barrier(unsigned int count) : state_(count << 1), notified_(false) { eigen_assert(((count << 1) >> 1) == count); } ~Barrier() { eigen_assert((state_>>1) == 0); } void Notify() { unsigned int v = state_.fetch_sub(2, std::memory_order_acq_rel) - 2; if (v != 1) { eigen_assert(((v + 2) & ~1) != 0); return; // either count has not dropped to 0, or waiter is not waiting } std::unique_lock<std::mutex> l(mu_); eigen_assert(!notified_); notified_ = true; cv_.notify_all(); } void Wait() { unsigned int v = state_.fetch_or(1, std::memory_order_acq_rel); if ((v >> 1) == 0) return; std::unique_lock<std::mutex> l(mu_); while (!notified_) { cv_.wait(l); } } private: std::mutex mu_; std::condition_variable cv_; std::atomic<unsigned int> state_; // low bit is waiter flag bool notified_; }; // Notification is an object that allows a user to to wait for another // thread to signal a notification that an event has occurred. // // Multiple threads can wait on the same Notification object, // but only one caller must call Notify() on the object. struct Notification : Barrier { Notification() : Barrier(1) {}; }; // Runs an arbitrary function and then calls Notify() on the passed in // Notification. template <typename Function, typename... Args> struct FunctionWrapperWithNotification { static void run(Notification* n, Function f, Args... args) { f(args...); if (n) { n->Notify(); } } }; template <typename Function, typename... Args> struct FunctionWrapperWithBarrier { static void run(Barrier* b, Function f, Args... args) { f(args...); if (b) { b->Notify(); } } }; template <typename SyncType> static EIGEN_STRONG_INLINE void wait_until_ready(SyncType* n) { if (n) { n->Wait(); } } // Build a thread pool device on top the an existing pool of threads. struct ThreadPoolDevice { // The ownership of the thread pool remains with the caller. ThreadPoolDevice(ThreadPoolInterface* pool, int num_cores) : pool_(pool), num_threads_(num_cores) { } EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const { return internal::aligned_malloc(num_bytes); } EIGEN_STRONG_INLINE void deallocate(void* buffer) const { internal::aligned_free(buffer); } EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const { ::memcpy(dst, src, n); } EIGEN_STRONG_INLINE void memcpyHostToDevice(void* dst, const void* src, size_t n) const { memcpy(dst, src, n); } EIGEN_STRONG_INLINE void memcpyDeviceToHost(void* dst, const void* src, size_t n) const { memcpy(dst, src, n); } EIGEN_STRONG_INLINE void memset(void* buffer, int c, size_t n) const { ::memset(buffer, c, n); } EIGEN_STRONG_INLINE int numThreads() const { return num_threads_; } EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const { return l1CacheSize(); } EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const { // The l3 cache size is shared between all the cores. return l3CacheSize() / num_threads_; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const { // Should return an enum that encodes the ISA supported by the CPU return 1; } template <class Function, class... Args> EIGEN_STRONG_INLINE Notification* enqueue(Function&& f, Args&&... args) const { Notification* n = new Notification(); pool_->Schedule(std::bind(&FunctionWrapperWithNotification<Function, Args...>::run, n, f, args...)); return n; } template <class Function, class... Args> EIGEN_STRONG_INLINE void enqueue_with_barrier(Barrier* b, Function&& f, Args&&... args) const { pool_->Schedule(std::bind( &FunctionWrapperWithBarrier<Function, Args...>::run, b, f, args...)); } template <class Function, class... Args> EIGEN_STRONG_INLINE void enqueueNoNotification(Function&& f, Args&&... args) const { pool_->Schedule(std::bind(f, args...)); } // Returns a logical thread index between 0 and pool_->NumThreads() - 1 if // called from one of the threads in pool_. Returns -1 otherwise. EIGEN_STRONG_INLINE int currentThreadId() const { return pool_->CurrentThreadId(); } // parallelFor executes f with [0, n) arguments in parallel and waits for // completion. F accepts a half-open interval [first, last). // Block size is choosen based on the iteration cost and resulting parallel // efficiency. If block_align is not nullptr, it is called to round up the // block size. void parallelFor(Index n, const TensorOpCost& cost, std::function<Index(Index)> block_align, std::function<void(Index, Index)> f) const { typedef TensorCostModel<ThreadPoolDevice> CostModel; if (n <= 1 || numThreads() == 1 || CostModel::numThreads(n, cost, static_cast<int>(numThreads())) == 1) { f(0, n); return; } // Calculate block size based on (1) the iteration cost and (2) parallel // efficiency. We want blocks to be not too small to mitigate // parallelization overheads; not too large to mitigate tail // effect and potential load imbalance and we also want number // of blocks to be evenly dividable across threads. double block_size_f = 1.0 / CostModel::taskSize(1, cost); Index block_size = numext::mini(n, numext::maxi<Index>(1, block_size_f)); const Index max_block_size = numext::mini(n, numext::maxi<Index>(1, 2 * block_size_f)); if (block_align) { Index new_block_size = block_align(block_size); eigen_assert(new_block_size >= block_size); block_size = numext::mini(n, new_block_size); } Index block_count = divup(n, block_size); // Calculate parallel efficiency as fraction of total CPU time used for // computations: double max_efficiency = static_cast<double>(block_count) / (divup<int>(block_count, numThreads()) * numThreads()); // Now try to increase block size up to max_block_size as long as it // doesn't decrease parallel efficiency. for (Index prev_block_count = block_count; prev_block_count > 1;) { // This is the next block size that divides size into a smaller number // of blocks than the current block_size. Index coarser_block_size = divup(n, prev_block_count - 1); if (block_align) { Index new_block_size = block_align(coarser_block_size); eigen_assert(new_block_size >= coarser_block_size); coarser_block_size = numext::mini(n, new_block_size); } if (coarser_block_size > max_block_size) { break; // Reached max block size. Stop. } // Recalculate parallel efficiency. const Index coarser_block_count = divup(n, coarser_block_size); eigen_assert(coarser_block_count < prev_block_count); prev_block_count = coarser_block_count; const double coarser_efficiency = static_cast<double>(coarser_block_count) / (divup<int>(coarser_block_count, numThreads()) * numThreads()); if (coarser_efficiency + 0.01 >= max_efficiency) { // Taking it. block_size = coarser_block_size; block_count = coarser_block_count; if (max_efficiency < coarser_efficiency) { max_efficiency = coarser_efficiency; } } } // Recursively divide size into halves until we reach block_size. // Division code rounds mid to block_size, so we are guaranteed to get // block_count leaves that do actual computations. Barrier barrier(static_cast<unsigned int>(block_count)); std::function<void(Index, Index)> handleRange; handleRange = [=, &handleRange, &barrier, &f](Index first, Index last) { if (last - first <= block_size) { // Single block or less, execute directly. f(first, last); barrier.Notify(); return; } // Split into halves and submit to the pool. Index mid = first + divup((last - first) / 2, block_size) * block_size; pool_->Schedule([=, &handleRange]() { handleRange(mid, last); }); pool_->Schedule([=, &handleRange]() { handleRange(first, mid); }); }; handleRange(0, n); barrier.Wait(); } // Convenience wrapper for parallelFor that does not align blocks. void parallelFor(Index n, const TensorOpCost& cost, std::function<void(Index, Index)> f) const { parallelFor(n, cost, nullptr, std::move(f)); } private: ThreadPoolInterface* pool_; int num_threads_; }; } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_THREAD_POOL_H
9,793
33.978571
104
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorDimensionList.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2015 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_DIMENSION_LIST_H #define EIGEN_CXX11_TENSOR_TENSOR_DIMENSION_LIST_H namespace Eigen { /** \internal * * \class TensorDimensionList * \ingroup CXX11_Tensor_Module * * \brief Special case of tensor index list used to list all the dimensions of a tensor of rank n. * * \sa Tensor */ template <typename Index, std::size_t Rank> struct DimensionList { EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE const Index operator[] (const Index i) const { return i; } }; namespace internal { template<typename Index, std::size_t Rank> struct array_size<DimensionList<Index, Rank> > { static const size_t value = Rank; }; template<typename Index, std::size_t Rank> struct array_size<const DimensionList<Index, Rank> > { static const size_t value = Rank; }; template<DenseIndex n, typename Index, std::size_t Rank> const Index array_get(DimensionList<Index, Rank>&) { return n; } template<DenseIndex n, typename Index, std::size_t Rank> const Index array_get(const DimensionList<Index, Rank>&) { return n; } #if EIGEN_HAS_CONSTEXPR template <typename Index, std::size_t Rank> struct index_known_statically_impl<DimensionList<Index, Rank> > { EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex) { return true; } }; template <typename Index, std::size_t Rank> struct index_known_statically_impl<const DimensionList<Index, Rank> > { EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex) { return true; } }; template <typename Index, std::size_t Rank> struct all_indices_known_statically_impl<DimensionList<Index, Rank> > { EIGEN_DEVICE_FUNC static constexpr bool run() { return true; } }; template <typename Index, std::size_t Rank> struct all_indices_known_statically_impl<const DimensionList<Index, Rank> > { EIGEN_DEVICE_FUNC static constexpr bool run() { return true; } }; template <typename Index, std::size_t Rank> struct indices_statically_known_to_increase_impl<DimensionList<Index, Rank> > { EIGEN_DEVICE_FUNC static constexpr bool run() { return true; } }; template <typename Index, std::size_t Rank> struct indices_statically_known_to_increase_impl<const DimensionList<Index, Rank> > { EIGEN_DEVICE_FUNC static constexpr bool run() { return true; } }; template <typename Index, std::size_t Rank> struct index_statically_eq_impl<DimensionList<Index, Rank> > { static constexpr bool run(const DenseIndex i, const DenseIndex value) { return i == value; } }; template <typename Index, std::size_t Rank> struct index_statically_eq_impl<const DimensionList<Index, Rank> > { EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { return i == value; } }; template <typename Index, std::size_t Rank> struct index_statically_ne_impl<DimensionList<Index, Rank> > { EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { return i != value; } }; template <typename Index, std::size_t Rank> struct index_statically_ne_impl<const DimensionList<Index, Rank> > { static constexpr bool run(const DenseIndex i, const DenseIndex value) { return i != value; } }; template <typename Index, std::size_t Rank> struct index_statically_gt_impl<DimensionList<Index, Rank> > { EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { return i > value; } }; template <typename Index, std::size_t Rank> struct index_statically_gt_impl<const DimensionList<Index, Rank> > { EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { return i > value; } }; template <typename Index, std::size_t Rank> struct index_statically_lt_impl<DimensionList<Index, Rank> > { EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { return i < value; } }; template <typename Index, std::size_t Rank> struct index_statically_lt_impl<const DimensionList<Index, Rank> > { EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { return i < value; } }; #else template <typename Index, std::size_t Rank> struct index_known_statically_impl<DimensionList<Index, Rank> > { EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run(const DenseIndex) { return true; } }; template <typename Index, std::size_t Rank> struct index_known_statically_impl<const DimensionList<Index, Rank> > { EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run(const DenseIndex) { return true; } }; template <typename Index, std::size_t Rank> struct all_indices_known_statically_impl<DimensionList<Index, Rank> > { EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run() { return true; } }; template <typename Index, std::size_t Rank> struct all_indices_known_statically_impl<const DimensionList<Index, Rank> > { EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run() { return true; } }; template <typename Index, std::size_t Rank> struct indices_statically_known_to_increase_impl<DimensionList<Index, Rank> > { static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() { return true; } }; template <typename Index, std::size_t Rank> struct indices_statically_known_to_increase_impl<const DimensionList<Index, Rank> > { static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() { return true; } }; template <typename Index, std::size_t Rank> struct index_statically_eq_impl<DimensionList<Index, Rank> > { static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { return false; } }; template <typename Index, std::size_t Rank> struct index_statically_eq_impl<const DimensionList<Index, Rank> > { static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { return false; } }; template <typename Index, std::size_t Rank> struct index_statically_ne_impl<DimensionList<Index, Rank> > { static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex){ return false; } }; template <typename Index, std::size_t Rank> struct index_statically_ne_impl<const DimensionList<Index, Rank> > { static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { return false; } }; template <typename Index, std::size_t Rank> struct index_statically_gt_impl<DimensionList<Index, Rank> > { static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { return false; } }; template <typename Index, std::size_t Rank> struct index_statically_gt_impl<const DimensionList<Index, Rank> > { static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { return false; } }; template <typename Index, std::size_t Rank> struct index_statically_lt_impl<DimensionList<Index, Rank> > { static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { return false; } }; template <typename Index, std::size_t Rank> struct index_statically_lt_impl<const DimensionList<Index, Rank> > { static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { return false; } }; #endif } // end namespace internal } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_DIMENSION_LIST_H
7,674
31.383966
115
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_EXECUTOR_H #define EIGEN_CXX11_TENSOR_TENSOR_EXECUTOR_H namespace Eigen { /** \class TensorExecutor * \ingroup CXX11_Tensor_Module * * \brief The tensor executor class. * * This class is responsible for launch the evaluation of the expression on * the specified computing device. */ namespace internal { // Default strategy: the expression is evaluated with a single cpu thread. template<typename Expression, typename Device, bool Vectorizable> class TensorExecutor { public: typedef typename Expression::Index Index; EIGEN_DEVICE_FUNC static inline void run(const Expression& expr, const Device& device = Device()) { TensorEvaluator<Expression, Device> evaluator(expr, device); const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); if (needs_assign) { const Index size = array_prod(evaluator.dimensions()); for (Index i = 0; i < size; ++i) { evaluator.evalScalar(i); } } evaluator.cleanup(); } }; template<typename Expression> class TensorExecutor<Expression, DefaultDevice, true> { public: typedef typename Expression::Index Index; EIGEN_DEVICE_FUNC static inline void run(const Expression& expr, const DefaultDevice& device = DefaultDevice()) { TensorEvaluator<Expression, DefaultDevice> evaluator(expr, device); const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); if (needs_assign) { const Index size = array_prod(evaluator.dimensions()); const int PacketSize = unpacket_traits<typename TensorEvaluator<Expression, DefaultDevice>::PacketReturnType>::size; // Give the compiler a strong hint to unroll the loop. But don't insist // on unrolling, because if the function is expensive the compiler should not // unroll the loop at the expense of inlining. const Index UnrolledSize = (size / (4 * PacketSize)) * 4 * PacketSize; for (Index i = 0; i < UnrolledSize; i += 4*PacketSize) { for (Index j = 0; j < 4; j++) { evaluator.evalPacket(i + j * PacketSize); } } const Index VectorizedSize = (size / PacketSize) * PacketSize; for (Index i = UnrolledSize; i < VectorizedSize; i += PacketSize) { evaluator.evalPacket(i); } for (Index i = VectorizedSize; i < size; ++i) { evaluator.evalScalar(i); } } evaluator.cleanup(); } }; // Multicore strategy: the index space is partitioned and each partition is executed on a single core #ifdef EIGEN_USE_THREADS template <typename Evaluator, typename Index, bool Vectorizable> struct EvalRange { static void run(Evaluator* evaluator_in, const Index first, const Index last) { Evaluator evaluator = *evaluator_in; eigen_assert(last >= first); for (Index i = first; i < last; ++i) { evaluator.evalScalar(i); } } static Index alignBlockSize(Index size) { return size; } }; template <typename Evaluator, typename Index> struct EvalRange<Evaluator, Index, true> { static const int PacketSize = unpacket_traits<typename Evaluator::PacketReturnType>::size; static void run(Evaluator* evaluator_in, const Index first, const Index last) { Evaluator evaluator = *evaluator_in; eigen_assert(last >= first); Index i = first; if (last - first >= PacketSize) { eigen_assert(first % PacketSize == 0); Index last_chunk_offset = last - 4 * PacketSize; // Give the compiler a strong hint to unroll the loop. But don't insist // on unrolling, because if the function is expensive the compiler should not // unroll the loop at the expense of inlining. for (; i <= last_chunk_offset; i += 4*PacketSize) { for (Index j = 0; j < 4; j++) { evaluator.evalPacket(i + j * PacketSize); } } last_chunk_offset = last - PacketSize; for (; i <= last_chunk_offset; i += PacketSize) { evaluator.evalPacket(i); } } for (; i < last; ++i) { evaluator.evalScalar(i); } } static Index alignBlockSize(Index size) { // Align block size to packet size and account for unrolling in run above. if (size >= 16 * PacketSize) { return (size + 4 * PacketSize - 1) & ~(4 * PacketSize - 1); } // Aligning to 4 * PacketSize would increase block size by more than 25%. return (size + PacketSize - 1) & ~(PacketSize - 1); } }; template <typename Expression, bool Vectorizable> class TensorExecutor<Expression, ThreadPoolDevice, Vectorizable> { public: typedef typename Expression::Index Index; static inline void run(const Expression& expr, const ThreadPoolDevice& device) { typedef TensorEvaluator<Expression, ThreadPoolDevice> Evaluator; Evaluator evaluator(expr, device); const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); if (needs_assign) { const Index size = array_prod(evaluator.dimensions()); #if !defined(EIGEN_USE_SIMPLE_THREAD_POOL) device.parallelFor(size, evaluator.costPerCoeff(Vectorizable), EvalRange<Evaluator, Index, Vectorizable>::alignBlockSize, [&evaluator](Index first, Index last) { EvalRange<Evaluator, Index, Vectorizable>::run(&evaluator, first, last); }); #else size_t num_threads = device.numThreads(); if (num_threads > 1) { num_threads = TensorCostModel<ThreadPoolDevice>::numThreads( size, evaluator.costPerCoeff(Vectorizable), num_threads); } if (num_threads == 1) { EvalRange<Evaluator, Index, Vectorizable>::run(&evaluator, 0, size); } else { const Index PacketSize = Vectorizable ? unpacket_traits<typename Evaluator::PacketReturnType>::size : 1; Index blocksz = std::ceil<Index>(static_cast<float>(size)/num_threads) + PacketSize - 1; const Index blocksize = numext::maxi<Index>(PacketSize, (blocksz - (blocksz % PacketSize))); const Index numblocks = size / blocksize; Barrier barrier(numblocks); for (int i = 0; i < numblocks; ++i) { device.enqueue_with_barrier( &barrier, &EvalRange<Evaluator, Index, Vectorizable>::run, &evaluator, i * blocksize, (i + 1) * blocksize); } if (numblocks * blocksize < size) { EvalRange<Evaluator, Index, Vectorizable>::run( &evaluator, numblocks * blocksize, size); } barrier.Wait(); } #endif // defined(!EIGEN_USE_SIMPLE_THREAD_POOL) } evaluator.cleanup(); } }; #endif // EIGEN_USE_THREADS // GPU: the evaluation of the expression is offloaded to a GPU. #if defined(EIGEN_USE_GPU) template <typename Expression, bool Vectorizable> class TensorExecutor<Expression, GpuDevice, Vectorizable> { public: typedef typename Expression::Index Index; static void run(const Expression& expr, const GpuDevice& device); }; #if defined(__CUDACC__) template <typename Evaluator, typename Index, bool Vectorizable> struct EigenMetaKernelEval { static __device__ EIGEN_ALWAYS_INLINE void run(Evaluator& eval, Index first, Index last, Index step_size) { for (Index i = first; i < last; i += step_size) { eval.evalScalar(i); } } }; template <typename Evaluator, typename Index> struct EigenMetaKernelEval<Evaluator, Index, true> { static __device__ EIGEN_ALWAYS_INLINE void run(Evaluator& eval, Index first, Index last, Index step_size) { const Index PacketSize = unpacket_traits<typename Evaluator::PacketReturnType>::size; const Index vectorized_size = (last / PacketSize) * PacketSize; const Index vectorized_step_size = step_size * PacketSize; // Use the vector path for (Index i = first * PacketSize; i < vectorized_size; i += vectorized_step_size) { eval.evalPacket(i); } for (Index i = vectorized_size + first; i < last; i += step_size) { eval.evalScalar(i); } } }; template <typename Evaluator, typename Index> __global__ void __launch_bounds__(1024) EigenMetaKernel(Evaluator eval, Index size) { const Index first_index = blockIdx.x * blockDim.x + threadIdx.x; const Index step_size = blockDim.x * gridDim.x; const bool vectorizable = Evaluator::PacketAccess & Evaluator::IsAligned; EigenMetaKernelEval<Evaluator, Index, vectorizable>::run(eval, first_index, size, step_size); } /*static*/ template <typename Expression, bool Vectorizable> inline void TensorExecutor<Expression, GpuDevice, Vectorizable>::run( const Expression& expr, const GpuDevice& device) { TensorEvaluator<Expression, GpuDevice> evaluator(expr, device); const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); if (needs_assign) { const int block_size = device.maxCudaThreadsPerBlock(); const int max_blocks = device.getNumCudaMultiProcessors() * device.maxCudaThreadsPerMultiProcessor() / block_size; const Index size = array_prod(evaluator.dimensions()); // Create a least one block to ensure we won't crash when tensorflow calls with tensors of size 0. const int num_blocks = numext::maxi<int>(numext::mini<int>(max_blocks, divup<int>(size, block_size)), 1); LAUNCH_CUDA_KERNEL( (EigenMetaKernel<TensorEvaluator<Expression, GpuDevice>, Index>), num_blocks, block_size, 0, device, evaluator, size); } evaluator.cleanup(); } #endif // __CUDACC__ #endif // EIGEN_USE_GPU // SYCL Executor policy #ifdef EIGEN_USE_SYCL template <typename Expression, bool Vectorizable> class TensorExecutor<Expression, SyclDevice, Vectorizable> { public: static inline void run(const Expression &expr, const SyclDevice &device) { // call TensorSYCL module TensorSycl::run(expr, device); } }; #endif } // end namespace internal } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_EXECUTOR_H
10,248
34.463668
122
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H #define EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H namespace Eigen { /** \class TensorForcedEval * \ingroup CXX11_Tensor_Module * * \brief Tensor reshaping class. * * */ /// template <class> class MakePointer_ is added to convert the host pointer to the device pointer. /// It is added due to the fact that for our device compiler T* is not allowed. /// If we wanted to use the same Evaluator functions we have to convert that type to our pointer T. /// This is done through our MakePointer_ class. By default the Type in the MakePointer_<T> is T* . /// Therefore, by adding the default value, we managed to convert the type and it does not break any /// existing code as its default value is T*. namespace internal { template<typename XprType, template <class> class MakePointer_> struct traits<TensorForcedEvalOp<XprType, MakePointer_> > { // Type promotion to handle the case where the types of the lhs and the rhs are different. typedef typename XprType::Scalar Scalar; typedef traits<XprType> XprTraits; typedef typename traits<XprType>::StorageKind StorageKind; typedef typename traits<XprType>::Index Index; typedef typename XprType::Nested Nested; typedef typename remove_reference<Nested>::type _Nested; static const int NumDimensions = XprTraits::NumDimensions; static const int Layout = XprTraits::Layout; enum { Flags = 0 }; template <class T> struct MakePointer { // Intermediate typedef to workaround MSVC issue. typedef MakePointer_<T> MakePointerT; typedef typename MakePointerT::Type Type; }; }; template<typename XprType, template <class> class MakePointer_> struct eval<TensorForcedEvalOp<XprType, MakePointer_>, Eigen::Dense> { typedef const TensorForcedEvalOp<XprType, MakePointer_>& type; }; template<typename XprType, template <class> class MakePointer_> struct nested<TensorForcedEvalOp<XprType, MakePointer_>, 1, typename eval<TensorForcedEvalOp<XprType, MakePointer_> >::type> { typedef TensorForcedEvalOp<XprType, MakePointer_> type; }; } // end namespace internal template<typename XprType, template <class> class MakePointer_> class TensorForcedEvalOp : public TensorBase<TensorForcedEvalOp<XprType, MakePointer_>, ReadOnlyAccessors> { public: typedef typename Eigen::internal::traits<TensorForcedEvalOp>::Scalar Scalar; typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType; typedef typename Eigen::internal::nested<TensorForcedEvalOp>::type Nested; typedef typename Eigen::internal::traits<TensorForcedEvalOp>::StorageKind StorageKind; typedef typename Eigen::internal::traits<TensorForcedEvalOp>::Index Index; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorForcedEvalOp(const XprType& expr) : m_xpr(expr) {} EIGEN_DEVICE_FUNC const typename internal::remove_all<typename XprType::Nested>::type& expression() const { return m_xpr; } protected: typename XprType::Nested m_xpr; }; template<typename ArgType, typename Device, template <class> class MakePointer_> struct TensorEvaluator<const TensorForcedEvalOp<ArgType, MakePointer_>, Device> { typedef TensorForcedEvalOp<ArgType, MakePointer_> XprType; typedef typename ArgType::Scalar Scalar; typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions; typedef typename XprType::Index Index; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType; static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; enum { IsAligned = true, PacketAccess = (PacketSize > 1), Layout = TensorEvaluator<ArgType, Device>::Layout, RawAccess = true }; EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) /// op_ is used for sycl : m_impl(op.expression(), device), m_op(op.expression()), m_device(device), m_buffer(NULL) { } EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) { const Index numValues = internal::array_prod(m_impl.dimensions()); m_buffer = (CoeffReturnType*)m_device.allocate(numValues * sizeof(CoeffReturnType)); // Should initialize the memory in case we're dealing with non POD types. if (NumTraits<CoeffReturnType>::RequireInitialization) { for (Index i = 0; i < numValues; ++i) { new(m_buffer+i) CoeffReturnType(); } } typedef TensorEvalToOp< const typename internal::remove_const<ArgType>::type > EvalTo; EvalTo evalToTmp(m_buffer, m_op); const bool PacketAccess = internal::IsVectorizable<Device, const ArgType>::value; internal::TensorExecutor<const EvalTo, typename internal::remove_const<Device>::type, PacketAccess>::run(evalToTmp, m_device); return true; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { m_device.deallocate(m_buffer); m_buffer = NULL; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_buffer[index]; } template<int LoadMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { return internal::ploadt<PacketReturnType, LoadMode>(m_buffer + index); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize); } EIGEN_DEVICE_FUNC typename MakePointer<Scalar>::Type data() const { return m_buffer; } /// required by sycl in order to extract the sycl accessor const TensorEvaluator<ArgType, Device>& impl() { return m_impl; } /// used by sycl in order to build the sycl buffer const Device& device() const{return m_device;} private: TensorEvaluator<ArgType, Device> m_impl; const ArgType m_op; const Device& m_device; typename MakePointer<CoeffReturnType>::Type m_buffer; }; } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H
6,508
37.744048
130
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H #define EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H namespace Eigen { // MakePointer class is used as a container of the adress space of the pointer // on the host and on the device. From the host side it generates the T* pointer // and when EIGEN_USE_SYCL is used it construct a buffer with a map_allocator to // T* m_data on the host. It is always called on the device. // Specialisation of MakePointer class for creating the sycl buffer with // map_allocator. template<typename T> struct MakePointer { typedef T* Type; }; template<typename PlainObjectType, int Options_ = Unaligned, template <class> class MakePointer_ = MakePointer> class TensorMap; template<typename Scalar_, int NumIndices_, int Options_ = 0, typename IndexType = DenseIndex> class Tensor; template<typename Scalar_, typename Dimensions, int Options_ = 0, typename IndexType = DenseIndex> class TensorFixedSize; template<typename PlainObjectType> class TensorRef; template<typename Derived, int AccessLevel> class TensorBase; template<typename NullaryOp, typename PlainObjectType> class TensorCwiseNullaryOp; template<typename UnaryOp, typename XprType> class TensorCwiseUnaryOp; template<typename BinaryOp, typename LeftXprType, typename RightXprType> class TensorCwiseBinaryOp; template<typename TernaryOp, typename Arg1XprType, typename Arg2XprType, typename Arg3XprType> class TensorCwiseTernaryOp; template<typename IfXprType, typename ThenXprType, typename ElseXprType> class TensorSelectOp; template<typename Op, typename Dims, typename XprType, template <class> class MakePointer_ = MakePointer > class TensorReductionOp; template<typename XprType> class TensorIndexTupleOp; template<typename ReduceOp, typename Dims, typename XprType> class TensorTupleReducerOp; template<typename Axis, typename LeftXprType, typename RightXprType> class TensorConcatenationOp; template<typename Dimensions, typename LeftXprType, typename RightXprType> class TensorContractionOp; template<typename TargetType, typename XprType> class TensorConversionOp; template<typename Dimensions, typename InputXprType, typename KernelXprType> class TensorConvolutionOp; template<typename FFT, typename XprType, int FFTDataType, int FFTDirection> class TensorFFTOp; template<typename PatchDim, typename XprType> class TensorPatchOp; template<DenseIndex Rows, DenseIndex Cols, typename XprType> class TensorImagePatchOp; template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType> class TensorVolumePatchOp; template<typename Broadcast, typename XprType> class TensorBroadcastingOp; template<DenseIndex DimId, typename XprType> class TensorChippingOp; template<typename NewDimensions, typename XprType> class TensorReshapingOp; template<typename XprType> class TensorLayoutSwapOp; template<typename StartIndices, typename Sizes, typename XprType> class TensorSlicingOp; template<typename ReverseDimensions, typename XprType> class TensorReverseOp; template<typename PaddingDimensions, typename XprType> class TensorPaddingOp; template<typename Shuffle, typename XprType> class TensorShufflingOp; template<typename Strides, typename XprType> class TensorStridingOp; template<typename StartIndices, typename StopIndices, typename Strides, typename XprType> class TensorStridingSlicingOp; template<typename Strides, typename XprType> class TensorInflationOp; template<typename Generator, typename XprType> class TensorGeneratorOp; template<typename LeftXprType, typename RightXprType> class TensorAssignOp; template<typename Op, typename XprType> class TensorScanOp; template<typename CustomUnaryFunc, typename XprType> class TensorCustomUnaryOp; template<typename CustomBinaryFunc, typename LhsXprType, typename RhsXprType> class TensorCustomBinaryOp; template<typename XprType, template <class> class MakePointer_ = MakePointer> class TensorEvalToOp; template<typename XprType, template <class> class MakePointer_ = MakePointer> class TensorForcedEvalOp; template<typename ExpressionType, typename DeviceType> class TensorDevice; template<typename Derived, typename Device> struct TensorEvaluator; struct DefaultDevice; struct ThreadPoolDevice; struct GpuDevice; struct SyclDevice; enum FFTResultType { RealPart = 0, ImagPart = 1, BothParts = 2 }; enum FFTDirection { FFT_FORWARD = 0, FFT_REVERSE = 1 }; namespace internal { template <typename Device, typename Expression> struct IsVectorizable { static const bool value = TensorEvaluator<Expression, Device>::PacketAccess; }; template <typename Expression> struct IsVectorizable<GpuDevice, Expression> { static const bool value = TensorEvaluator<Expression, GpuDevice>::PacketAccess && TensorEvaluator<Expression, GpuDevice>::IsAligned; }; template <typename Expression, typename Device, bool Vectorizable = IsVectorizable<Device, Expression>::value> class TensorExecutor; } // end namespace internal } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H
5,412
48.209091
131
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_FUNCTORS_H #define EIGEN_CXX11_TENSOR_TENSOR_FUNCTORS_H namespace Eigen { namespace internal { /** \internal * \brief Template functor to compute the modulo between an array and a scalar. */ template <typename Scalar> struct scalar_mod_op { EIGEN_DEVICE_FUNC scalar_mod_op(const Scalar& divisor) : m_divisor(divisor) {} EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a) const { return a % m_divisor; } const Scalar m_divisor; }; template <typename Scalar> struct functor_traits<scalar_mod_op<Scalar> > { enum { Cost = scalar_div_cost<Scalar,false>::value, PacketAccess = false }; }; /** \internal * \brief Template functor to compute the modulo between 2 arrays. */ template <typename Scalar> struct scalar_mod2_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_mod2_op); EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a, const Scalar& b) const { return a % b; } }; template <typename Scalar> struct functor_traits<scalar_mod2_op<Scalar> > { enum { Cost = scalar_div_cost<Scalar,false>::value, PacketAccess = false }; }; template <typename Scalar> struct scalar_fmod_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_fmod_op); EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const Scalar& a, const Scalar& b) const { return numext::fmod(a, b); } }; template <typename Scalar> struct functor_traits<scalar_fmod_op<Scalar> > { enum { Cost = 13, // Reciprocal throughput of FPREM on Haswell. PacketAccess = false }; }; /** \internal * \brief Template functor to compute the sigmoid of a scalar * \sa class CwiseUnaryOp, ArrayBase::sigmoid() */ template <typename T> struct scalar_sigmoid_op { EIGEN_EMPTY_STRUCT_CTOR(scalar_sigmoid_op) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(const T& x) const { const T one = T(1); return one / (one + numext::exp(-x)); } template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x) const { const Packet one = pset1<Packet>(T(1)); return pdiv(one, padd(one, pexp(pnegate(x)))); } }; template <typename T> struct functor_traits<scalar_sigmoid_op<T> > { enum { Cost = NumTraits<T>::AddCost * 2 + NumTraits<T>::MulCost * 6, PacketAccess = packet_traits<T>::HasAdd && packet_traits<T>::HasDiv && packet_traits<T>::HasNegate && packet_traits<T>::HasExp }; }; template<typename Reducer, typename Device> struct reducer_traits { enum { Cost = 1, PacketAccess = false }; }; // Standard reduction functors template <typename T> struct SumReducer { static const bool PacketAccess = packet_traits<T>::HasAdd; static const bool IsStateful = false; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const { internal::scalar_sum_op<T> sum_op; *accum = sum_op(*accum, t); } template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const { (*accum) = padd<Packet>(*accum, p); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { internal::scalar_cast_op<int, T> conv; return conv(0); } template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const { return pset1<Packet>(initialize()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const { return accum; } template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const { return vaccum; } template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { internal::scalar_sum_op<T> sum_op; return sum_op(saccum, predux(vaccum)); } }; template <typename T, typename Device> struct reducer_traits<SumReducer<T>, Device> { enum { Cost = NumTraits<T>::AddCost, PacketAccess = PacketType<T, Device>::HasAdd }; }; template <typename T> struct MeanReducer { static const bool PacketAccess = packet_traits<T>::HasAdd && !NumTraits<T>::IsInteger; static const bool IsStateful = true; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE MeanReducer() : scalarCount_(0), packetCount_(0) { } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) { internal::scalar_sum_op<T> sum_op; *accum = sum_op(*accum, t); scalarCount_++; } template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) { (*accum) = padd<Packet>(*accum, p); packetCount_++; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { internal::scalar_cast_op<int, T> conv; return conv(0); } template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const { return pset1<Packet>(initialize()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const { return accum / scalarCount_; } template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const { return pdiv(vaccum, pset1<Packet>(packetCount_)); } template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { internal::scalar_sum_op<T> sum_op; return sum_op(saccum, predux(vaccum)) / (scalarCount_ + packetCount_ * unpacket_traits<Packet>::size); } protected: DenseIndex scalarCount_; DenseIndex packetCount_; }; template <typename T, typename Device> struct reducer_traits<MeanReducer<T>, Device> { enum { Cost = NumTraits<T>::AddCost, PacketAccess = PacketType<T, Device>::HasAdd }; }; template <typename T, bool IsMax = true, bool IsInteger = true> struct MinMaxBottomValue { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() { return Eigen::NumTraits<T>::lowest(); } }; template <typename T> struct MinMaxBottomValue<T, true, false> { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() { return -Eigen::NumTraits<T>::infinity(); } }; template <typename T> struct MinMaxBottomValue<T, false, true> { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() { return Eigen::NumTraits<T>::highest(); } }; template <typename T> struct MinMaxBottomValue<T, false, false> { EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() { return Eigen::NumTraits<T>::infinity(); } }; template <typename T> struct MaxReducer { static const bool PacketAccess = packet_traits<T>::HasMax; static const bool IsStateful = false; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const { if (t > *accum) { *accum = t; } } template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const { (*accum) = pmax<Packet>(*accum, p); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { return MinMaxBottomValue<T, true, Eigen::NumTraits<T>::IsInteger>::bottom_value(); } template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const { return pset1<Packet>(initialize()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const { return accum; } template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const { return vaccum; } template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { return numext::maxi(saccum, predux_max(vaccum)); } }; template <typename T, typename Device> struct reducer_traits<MaxReducer<T>, Device> { enum { Cost = NumTraits<T>::AddCost, PacketAccess = PacketType<T, Device>::HasMax }; }; template <typename T> struct MinReducer { static const bool PacketAccess = packet_traits<T>::HasMin; static const bool IsStateful = false; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const { if (t < *accum) { *accum = t; } } template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const { (*accum) = pmin<Packet>(*accum, p); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { return MinMaxBottomValue<T, false, Eigen::NumTraits<T>::IsInteger>::bottom_value(); } template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const { return pset1<Packet>(initialize()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const { return accum; } template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const { return vaccum; } template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { return numext::mini(saccum, predux_min(vaccum)); } }; template <typename T, typename Device> struct reducer_traits<MinReducer<T>, Device> { enum { Cost = NumTraits<T>::AddCost, PacketAccess = PacketType<T, Device>::HasMin }; }; template <typename T> struct ProdReducer { static const bool PacketAccess = packet_traits<T>::HasMul; static const bool IsStateful = false; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const { internal::scalar_product_op<T> prod_op; (*accum) = prod_op(*accum, t); } template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const { (*accum) = pmul<Packet>(*accum, p); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { internal::scalar_cast_op<int, T> conv; return conv(1); } template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const { return pset1<Packet>(initialize()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const { return accum; } template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const { return vaccum; } template <typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { internal::scalar_product_op<T> prod_op; return prod_op(saccum, predux_mul(vaccum)); } }; template <typename T, typename Device> struct reducer_traits<ProdReducer<T>, Device> { enum { Cost = NumTraits<T>::MulCost, PacketAccess = PacketType<T, Device>::HasMul }; }; struct AndReducer { static const bool PacketAccess = false; static const bool IsStateful = false; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(bool t, bool* accum) const { *accum = *accum && t; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool initialize() const { return true; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool finalize(bool accum) const { return accum; } }; template <typename Device> struct reducer_traits<AndReducer, Device> { enum { Cost = 1, PacketAccess = false }; }; struct OrReducer { static const bool PacketAccess = false; static const bool IsStateful = false; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(bool t, bool* accum) const { *accum = *accum || t; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool initialize() const { return false; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool finalize(bool accum) const { return accum; } }; template <typename Device> struct reducer_traits<OrReducer, Device> { enum { Cost = 1, PacketAccess = false }; }; // Argmin/Argmax reducers template <typename T> struct ArgMaxTupleReducer { static const bool PacketAccess = false; static const bool IsStateful = false; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const { if (t.second > accum->second) { *accum = t; } } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { return T(0, NumTraits<typename T::second_type>::lowest()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T& accum) const { return accum; } }; template <typename T, typename Device> struct reducer_traits<ArgMaxTupleReducer<T>, Device> { enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; }; template <typename T> struct ArgMinTupleReducer { static const bool PacketAccess = false; static const bool IsStateful = false; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T& t, T* accum) const { if (t.second < accum->second) { *accum = t; } } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { return T(0, NumTraits<typename T::second_type>::highest()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T& accum) const { return accum; } }; template <typename T, typename Device> struct reducer_traits<ArgMinTupleReducer<T>, Device> { enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; }; template <typename T, typename Index, size_t NumDims> class GaussianGenerator { public: static const bool PacketAccess = false; EIGEN_DEVICE_FUNC GaussianGenerator(const array<T, NumDims>& means, const array<T, NumDims>& std_devs) : m_means(means) { for (size_t i = 0; i < NumDims; ++i) { m_two_sigmas[i] = std_devs[i] * std_devs[i] * 2; } } EIGEN_DEVICE_FUNC T operator()(const array<Index, NumDims>& coordinates) const { T tmp = T(0); for (size_t i = 0; i < NumDims; ++i) { T offset = coordinates[i] - m_means[i]; tmp += offset * offset / m_two_sigmas[i]; } return numext::exp(-tmp); } private: array<T, NumDims> m_means; array<T, NumDims> m_two_sigmas; }; template <typename T, typename Index, size_t NumDims> struct functor_traits<GaussianGenerator<T, Index, NumDims> > { enum { Cost = NumDims * (2 * NumTraits<T>::AddCost + NumTraits<T>::MulCost + functor_traits<scalar_quotient_op<T, T> >::Cost) + functor_traits<scalar_exp_op<T> >::Cost, PacketAccess = GaussianGenerator<T, Index, NumDims>::PacketAccess }; }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_FUNCTORS_H
14,625
28.84898
106
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2015 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H #define EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H namespace Eigen { /** \class TensorGenerator * \ingroup CXX11_Tensor_Module * * \brief Tensor generator class. * * */ namespace internal { template<typename Generator, typename XprType> struct traits<TensorGeneratorOp<Generator, XprType> > : public traits<XprType> { typedef typename XprType::Scalar Scalar; typedef traits<XprType> XprTraits; typedef typename XprTraits::StorageKind StorageKind; typedef typename XprTraits::Index Index; typedef typename XprType::Nested Nested; typedef typename remove_reference<Nested>::type _Nested; static const int NumDimensions = XprTraits::NumDimensions; static const int Layout = XprTraits::Layout; }; template<typename Generator, typename XprType> struct eval<TensorGeneratorOp<Generator, XprType>, Eigen::Dense> { typedef const TensorGeneratorOp<Generator, XprType>& type; }; template<typename Generator, typename XprType> struct nested<TensorGeneratorOp<Generator, XprType>, 1, typename eval<TensorGeneratorOp<Generator, XprType> >::type> { typedef TensorGeneratorOp<Generator, XprType> type; }; } // end namespace internal template<typename Generator, typename XprType> class TensorGeneratorOp : public TensorBase<TensorGeneratorOp<Generator, XprType>, ReadOnlyAccessors> { public: typedef typename Eigen::internal::traits<TensorGeneratorOp>::Scalar Scalar; typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename Eigen::internal::nested<TensorGeneratorOp>::type Nested; typedef typename Eigen::internal::traits<TensorGeneratorOp>::StorageKind StorageKind; typedef typename Eigen::internal::traits<TensorGeneratorOp>::Index Index; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorGeneratorOp(const XprType& expr, const Generator& generator) : m_xpr(expr), m_generator(generator) {} EIGEN_DEVICE_FUNC const Generator& generator() const { return m_generator; } EIGEN_DEVICE_FUNC const typename internal::remove_all<typename XprType::Nested>::type& expression() const { return m_xpr; } protected: typename XprType::Nested m_xpr; const Generator m_generator; }; // Eval as rvalue template<typename Generator, typename ArgType, typename Device> struct TensorEvaluator<const TensorGeneratorOp<Generator, ArgType>, Device> { typedef TensorGeneratorOp<Generator, ArgType> XprType; typedef typename XprType::Index Index; typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions; static const int NumDims = internal::array_size<Dimensions>::value; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType; enum { IsAligned = false, PacketAccess = (internal::unpacket_traits<PacketReturnType>::size > 1), BlockAccess = false, Layout = TensorEvaluator<ArgType, Device>::Layout, CoordAccess = false, // to be implemented RawAccess = false }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : m_generator(op.generator()) { TensorEvaluator<ArgType, Device> impl(op.expression(), device); m_dimensions = impl.dimensions(); if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { m_strides[0] = 1; for (int i = 1; i < NumDims; ++i) { m_strides[i] = m_strides[i - 1] * m_dimensions[i - 1]; } } else { m_strides[NumDims - 1] = 1; for (int i = NumDims - 2; i >= 0; --i) { m_strides[i] = m_strides[i + 1] * m_dimensions[i + 1]; } } } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) { return true; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { array<Index, NumDims> coords; extract_coordinates(index, coords); return m_generator(coords); } template<int LoadMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { const int packetSize = internal::unpacket_traits<PacketReturnType>::size; EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) eigen_assert(index+packetSize-1 < dimensions().TotalSize()); EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[packetSize]; for (int i = 0; i < packetSize; ++i) { values[i] = coeff(index+i); } PacketReturnType rslt = internal::pload<PacketReturnType>(values); return rslt; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const { // TODO(rmlarsen): This is just a placeholder. Define interface to make // generators return their cost. return TensorOpCost(0, 0, TensorOpCost::AddCost<Scalar>() + TensorOpCost::MulCost<Scalar>()); } EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; } protected: EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void extract_coordinates(Index index, array<Index, NumDims>& coords) const { if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { for (int i = NumDims - 1; i > 0; --i) { const Index idx = index / m_strides[i]; index -= idx * m_strides[i]; coords[i] = idx; } coords[0] = index; } else { for (int i = 0; i < NumDims - 1; ++i) { const Index idx = index / m_strides[i]; index -= idx * m_strides[i]; coords[i] = idx; } coords[NumDims-1] = index; } } Dimensions m_dimensions; array<Index, NumDims> m_strides; Generator m_generator; }; } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H
6,339
33.086022
116
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorGlobalFunctions.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Eugene Brevdo <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_GLOBAL_FUNCTIONS_H #define EIGEN_CXX11_TENSOR_TENSOR_GLOBAL_FUNCTIONS_H namespace Eigen { /** \cpp11 \returns an expression of the coefficient-wise betainc(\a x, \a a, \a b) to the given tensors. * * This function computes the regularized incomplete beta function (integral). * */ template <typename ADerived, typename BDerived, typename XDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const TensorCwiseTernaryOp<internal::scalar_betainc_op<typename XDerived::Scalar>, const ADerived, const BDerived, const XDerived> betainc(const ADerived& a, const BDerived& b, const XDerived& x) { return TensorCwiseTernaryOp< internal::scalar_betainc_op<typename XDerived::Scalar>, const ADerived, const BDerived, const XDerived>( a, b, x, internal::scalar_betainc_op<typename XDerived::Scalar>()); } } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_GLOBAL_FUNCTIONS_H
1,316
37.735294
105
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorIO.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_IO_H #define EIGEN_CXX11_TENSOR_TENSOR_IO_H namespace Eigen { namespace internal { // Print the tensor as a 2d matrix template <typename Tensor, int Rank> struct TensorPrinter { static void run (std::ostream& os, const Tensor& tensor) { typedef typename internal::remove_const<typename Tensor::Scalar>::type Scalar; typedef typename Tensor::Index Index; const Index total_size = internal::array_prod(tensor.dimensions()); if (total_size > 0) { const Index first_dim = Eigen::internal::array_get<0>(tensor.dimensions()); static const int layout = Tensor::Layout; Map<const Array<Scalar, Dynamic, Dynamic, layout> > matrix(const_cast<Scalar*>(tensor.data()), first_dim, total_size/first_dim); os << matrix; } } }; // Print the tensor as a vector template <typename Tensor> struct TensorPrinter<Tensor, 1> { static void run (std::ostream& os, const Tensor& tensor) { typedef typename internal::remove_const<typename Tensor::Scalar>::type Scalar; typedef typename Tensor::Index Index; const Index total_size = internal::array_prod(tensor.dimensions()); if (total_size > 0) { Map<const Array<Scalar, Dynamic, 1> > array(const_cast<Scalar*>(tensor.data()), total_size); os << array; } } }; // Print the tensor as a scalar template <typename Tensor> struct TensorPrinter<Tensor, 0> { static void run (std::ostream& os, const Tensor& tensor) { os << tensor.coeff(0); } }; } template <typename T> std::ostream& operator << (std::ostream& os, const TensorBase<T, ReadOnlyAccessors>& expr) { typedef TensorEvaluator<const TensorForcedEvalOp<const T>, DefaultDevice> Evaluator; typedef typename Evaluator::Dimensions Dimensions; // Evaluate the expression if needed TensorForcedEvalOp<const T> eval = expr.eval(); Evaluator tensor(eval, DefaultDevice()); tensor.evalSubExprsIfNeeded(NULL); // Print the result static const int rank = internal::array_size<Dimensions>::value; internal::TensorPrinter<Evaluator, rank>::run(os, tensor); // Cleanup. tensor.cleanup(); return os; } } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_IO_H
2,560
31.0125
134
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2015 Ke Yang <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_INFLATION_H #define EIGEN_CXX11_TENSOR_TENSOR_INFLATION_H namespace Eigen { /** \class TensorInflation * \ingroup CXX11_Tensor_Module * * \brief Tensor inflation class. * * */ namespace internal { template<typename Strides, typename XprType> struct traits<TensorInflationOp<Strides, XprType> > : public traits<XprType> { typedef typename XprType::Scalar Scalar; typedef traits<XprType> XprTraits; typedef typename XprTraits::StorageKind StorageKind; typedef typename XprTraits::Index Index; typedef typename XprType::Nested Nested; typedef typename remove_reference<Nested>::type _Nested; static const int NumDimensions = XprTraits::NumDimensions; static const int Layout = XprTraits::Layout; }; template<typename Strides, typename XprType> struct eval<TensorInflationOp<Strides, XprType>, Eigen::Dense> { typedef const TensorInflationOp<Strides, XprType>& type; }; template<typename Strides, typename XprType> struct nested<TensorInflationOp<Strides, XprType>, 1, typename eval<TensorInflationOp<Strides, XprType> >::type> { typedef TensorInflationOp<Strides, XprType> type; }; } // end namespace internal template<typename Strides, typename XprType> class TensorInflationOp : public TensorBase<TensorInflationOp<Strides, XprType>, ReadOnlyAccessors> { public: typedef typename Eigen::internal::traits<TensorInflationOp>::Scalar Scalar; typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename Eigen::internal::nested<TensorInflationOp>::type Nested; typedef typename Eigen::internal::traits<TensorInflationOp>::StorageKind StorageKind; typedef typename Eigen::internal::traits<TensorInflationOp>::Index Index; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorInflationOp(const XprType& expr, const Strides& strides) : m_xpr(expr), m_strides(strides) {} EIGEN_DEVICE_FUNC const Strides& strides() const { return m_strides; } EIGEN_DEVICE_FUNC const typename internal::remove_all<typename XprType::Nested>::type& expression() const { return m_xpr; } protected: typename XprType::Nested m_xpr; const Strides m_strides; }; // Eval as rvalue template<typename Strides, typename ArgType, typename Device> struct TensorEvaluator<const TensorInflationOp<Strides, ArgType>, Device> { typedef TensorInflationOp<Strides, ArgType> XprType; typedef typename XprType::Index Index; static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value; typedef DSizes<Index, NumDims> Dimensions; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType; static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; enum { IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/ false, PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess, BlockAccess = false, Layout = TensorEvaluator<ArgType, Device>::Layout, CoordAccess = false, // to be implemented RawAccess = false }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : m_impl(op.expression(), device), m_strides(op.strides()) { m_dimensions = m_impl.dimensions(); // Expand each dimension to the inflated dimension. for (int i = 0; i < NumDims; ++i) { m_dimensions[i] = (m_dimensions[i] - 1) * op.strides()[i] + 1; } // Remember the strides for fast division. for (int i = 0; i < NumDims; ++i) { m_fastStrides[i] = internal::TensorIntDivisor<Index>(m_strides[i]); } const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions(); if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { m_outputStrides[0] = 1; m_inputStrides[0] = 1; for (int i = 1; i < NumDims; ++i) { m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1]; m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1]; } } else { // RowMajor m_outputStrides[NumDims-1] = 1; m_inputStrides[NumDims-1] = 1; for (int i = NumDims - 2; i >= 0; --i) { m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1]; m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1]; } } } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) { m_impl.evalSubExprsIfNeeded(NULL); return true; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { m_impl.cleanup(); } // Computes the input index given the output index. Returns true if the output // index doesn't fall into a hole. EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool getInputIndex(Index index, Index* inputIndex) const { eigen_assert(index < dimensions().TotalSize()); *inputIndex = 0; if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { for (int i = NumDims - 1; i > 0; --i) { const Index idx = index / m_outputStrides[i]; if (idx != idx / m_fastStrides[i] * m_strides[i]) { return false; } *inputIndex += idx / m_strides[i] * m_inputStrides[i]; index -= idx * m_outputStrides[i]; } if (index != index / m_fastStrides[0] * m_strides[0]) { return false; } *inputIndex += index / m_strides[0]; return true; } else { for (int i = 0; i < NumDims - 1; ++i) { const Index idx = index / m_outputStrides[i]; if (idx != idx / m_fastStrides[i] * m_strides[i]) { return false; } *inputIndex += idx / m_strides[i] * m_inputStrides[i]; index -= idx * m_outputStrides[i]; } if (index != index / m_fastStrides[NumDims-1] * m_strides[NumDims-1]) { return false; } *inputIndex += index / m_strides[NumDims - 1]; } return true; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { Index inputIndex = 0; if (getInputIndex(index, &inputIndex)) { return m_impl.coeff(inputIndex); } else { return Scalar(0); } } // TODO(yangke): optimize this function so that we can detect and produce // all-zero packets template<int LoadMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize]; for (int i = 0; i < PacketSize; ++i) { values[i] = coeff(index+i); } PacketReturnType rslt = internal::pload<PacketReturnType>(values); return rslt; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { const double compute_cost = NumDims * (3 * TensorOpCost::DivCost<Index>() + 3 * TensorOpCost::MulCost<Index>() + 2 * TensorOpCost::AddCost<Index>()); const double input_size = m_impl.dimensions().TotalSize(); const double output_size = m_dimensions.TotalSize(); if (output_size == 0) return TensorOpCost(); return m_impl.costPerCoeff(vectorized) + TensorOpCost(sizeof(CoeffReturnType) * input_size / output_size, 0, compute_cost, vectorized, PacketSize); } EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; } protected: Dimensions m_dimensions; array<Index, NumDims> m_outputStrides; array<Index, NumDims> m_inputStrides; TensorEvaluator<ArgType, Device> m_impl; const Strides m_strides; array<internal::TensorIntDivisor<Index>, NumDims> m_fastStrides; }; } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_INFLATION_H
8,430
35.656522
112
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorInitializer.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_INITIALIZER_H #define EIGEN_CXX11_TENSOR_TENSOR_INITIALIZER_H #if EIGEN_HAS_VARIADIC_TEMPLATES #include <initializer_list> namespace Eigen { /** \class TensorInitializer * \ingroup CXX11_Tensor_Module * * \brief Helper template to initialize Tensors from std::initializer_lists. */ namespace internal { template <typename Derived, int N> struct Initializer { typedef std::initializer_list< typename Initializer<Derived, N - 1>::InitList> InitList; static void run(TensorEvaluator<Derived, DefaultDevice>& tensor, Eigen::array<typename traits<Derived>::Index, traits<Derived>::NumDimensions>* indices, const InitList& vals) { int i = 0; for (auto v : vals) { (*indices)[traits<Derived>::NumDimensions - N] = i++; Initializer<Derived, N - 1>::run(tensor, indices, v); } } }; template <typename Derived> struct Initializer<Derived, 1> { typedef std::initializer_list<typename traits<Derived>::Scalar> InitList; static void run(TensorEvaluator<Derived, DefaultDevice>& tensor, Eigen::array<typename traits<Derived>::Index, traits<Derived>::NumDimensions>* indices, const InitList& vals) { int i = 0; // There is likely a faster way to do that than iterating. for (auto v : vals) { (*indices)[traits<Derived>::NumDimensions - 1] = i++; tensor.coeffRef(*indices) = v; } } }; template <typename Derived> struct Initializer<Derived, 0> { typedef typename traits<Derived>::Scalar InitList; static void run(TensorEvaluator<Derived, DefaultDevice>& tensor, Eigen::array<typename traits<Derived>::Index, traits<Derived>::NumDimensions>*, const InitList& v) { tensor.coeffRef(0) = v; } }; template <typename Derived, int N> void initialize_tensor(TensorEvaluator<Derived, DefaultDevice>& tensor, const typename Initializer<Derived, traits<Derived>::NumDimensions>::InitList& vals) { Eigen::array<typename traits<Derived>::Index, traits<Derived>::NumDimensions> indices; Initializer<Derived, traits<Derived>::NumDimensions>::run(tensor, &indices, vals); } } // namespace internal } // namespace Eigen #endif // EIGEN_HAS_VARIADIC_TEMPLATES #endif // EIGEN_CXX11_TENSOR_TENSOR_INITIALIZER_H
2,716
31.73494
109
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_LAYOUT_SWAP_H #define EIGEN_CXX11_TENSOR_TENSOR_LAYOUT_SWAP_H namespace Eigen { /** \class TensorLayoutSwap * \ingroup CXX11_Tensor_Module * * \brief Swap the layout from col-major to row-major, or row-major * to col-major, and invert the order of the dimensions. * * Beware: the dimensions are reversed by this operation. If you want to * preserve the ordering of the dimensions, you need to combine this * operation with a shuffle. * * \example: * Tensor<float, 2, ColMajor> input(2, 4); * Tensor<float, 2, RowMajor> output = input.swap_layout(); * eigen_assert(output.dimension(0) == 4); * eigen_assert(output.dimension(1) == 2); * * array<int, 2> shuffle(1, 0); * output = input.swap_layout().shuffle(shuffle); * eigen_assert(output.dimension(0) == 2); * eigen_assert(output.dimension(1) == 4); * */ namespace internal { template<typename XprType> struct traits<TensorLayoutSwapOp<XprType> > : public traits<XprType> { typedef typename XprType::Scalar Scalar; typedef traits<XprType> XprTraits; typedef typename XprTraits::StorageKind StorageKind; typedef typename XprTraits::Index Index; typedef typename XprType::Nested Nested; typedef typename remove_reference<Nested>::type _Nested; static const int NumDimensions = traits<XprType>::NumDimensions; static const int Layout = (traits<XprType>::Layout == ColMajor) ? RowMajor : ColMajor; }; template<typename XprType> struct eval<TensorLayoutSwapOp<XprType>, Eigen::Dense> { typedef const TensorLayoutSwapOp<XprType>& type; }; template<typename XprType> struct nested<TensorLayoutSwapOp<XprType>, 1, typename eval<TensorLayoutSwapOp<XprType> >::type> { typedef TensorLayoutSwapOp<XprType> type; }; } // end namespace internal template<typename XprType> class TensorLayoutSwapOp : public TensorBase<TensorLayoutSwapOp<XprType>, WriteAccessors> { public: typedef typename Eigen::internal::traits<TensorLayoutSwapOp>::Scalar Scalar; typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType; typedef typename Eigen::internal::nested<TensorLayoutSwapOp>::type Nested; typedef typename Eigen::internal::traits<TensorLayoutSwapOp>::StorageKind StorageKind; typedef typename Eigen::internal::traits<TensorLayoutSwapOp>::Index Index; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorLayoutSwapOp(const XprType& expr) : m_xpr(expr) {} EIGEN_DEVICE_FUNC const typename internal::remove_all<typename XprType::Nested>::type& expression() const { return m_xpr; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorLayoutSwapOp& operator = (const TensorLayoutSwapOp& other) { typedef TensorAssignOp<TensorLayoutSwapOp, const TensorLayoutSwapOp> Assign; Assign assign(*this, other); internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice()); return *this; } template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorLayoutSwapOp& operator = (const OtherDerived& other) { typedef TensorAssignOp<TensorLayoutSwapOp, const OtherDerived> Assign; Assign assign(*this, other); internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice()); return *this; } protected: typename XprType::Nested m_xpr; }; // Eval as rvalue template<typename ArgType, typename Device> struct TensorEvaluator<const TensorLayoutSwapOp<ArgType>, Device> { typedef TensorLayoutSwapOp<ArgType> XprType; typedef typename XprType::Index Index; static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value; typedef DSizes<Index, NumDims> Dimensions; enum { IsAligned = TensorEvaluator<ArgType, Device>::IsAligned, PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess, Layout = (static_cast<int>(TensorEvaluator<ArgType, Device>::Layout) == static_cast<int>(ColMajor)) ? RowMajor : ColMajor, CoordAccess = false, // to be implemented RawAccess = TensorEvaluator<ArgType, Device>::RawAccess }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : m_impl(op.expression(), device) { for(int i = 0; i < NumDims; ++i) { m_dimensions[i] = m_impl.dimensions()[NumDims-1-i]; } } typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) { return m_impl.evalSubExprsIfNeeded(data); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { m_impl.cleanup(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_impl.coeff(index); } template<int LoadMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { return m_impl.template packet<LoadMode>(index); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { return m_impl.costPerCoeff(vectorized); } EIGEN_DEVICE_FUNC Scalar* data() const { return m_impl.data(); } const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; } protected: TensorEvaluator<ArgType, Device> m_impl; Dimensions m_dimensions; }; // Eval as lvalue template<typename ArgType, typename Device> struct TensorEvaluator<TensorLayoutSwapOp<ArgType>, Device> : public TensorEvaluator<const TensorLayoutSwapOp<ArgType>, Device> { typedef TensorEvaluator<const TensorLayoutSwapOp<ArgType>, Device> Base; typedef TensorLayoutSwapOp<ArgType> XprType; enum { IsAligned = TensorEvaluator<ArgType, Device>::IsAligned, PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess, Layout = (static_cast<int>(TensorEvaluator<ArgType, Device>::Layout) == static_cast<int>(ColMajor)) ? RowMajor : ColMajor, CoordAccess = false // to be implemented }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : Base(op, device) { } typedef typename XprType::Index Index; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) { return this->m_impl.coeffRef(index); } template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writePacket(Index index, const PacketReturnType& x) { this->m_impl.template writePacket<StoreMode>(index, x); } }; } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_LAYOUT_SWAP_H
7,354
34.02381
126
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2015 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_META_MACROS_H #define EIGEN_CXX11_TENSOR_TENSOR_META_MACROS_H /** use this macro in sfinae selection in templated functions * * template<typename T, * typename std::enable_if< isBanana<T>::value , int >::type = 0 * > * void foo(){} * * becomes => * * template<typename TopoType, * SFINAE_ENABLE_IF( isBanana<T>::value ) * > * void foo(){} */ // SFINAE requires variadic templates #ifndef __CUDACC__ #if EIGEN_HAS_VARIADIC_TEMPLATES // SFINAE doesn't work for gcc <= 4.7 #ifdef EIGEN_COMP_GNUC #if EIGEN_GNUC_AT_LEAST(4,8) #define EIGEN_HAS_SFINAE #endif #else #define EIGEN_HAS_SFINAE #endif #endif #endif #define EIGEN_SFINAE_ENABLE_IF( __condition__ ) \ typename internal::enable_if< ( __condition__ ) , int >::type = 0 #if EIGEN_HAS_CONSTEXPR #define EIGEN_CONSTEXPR constexpr #else #define EIGEN_CONSTEXPR #endif #endif
1,310
22.836364
75
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2015 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_META_H #define EIGEN_CXX11_TENSOR_TENSOR_META_H namespace Eigen { template<bool cond> struct Cond {}; template<typename T1, typename T2> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE const T1& choose(Cond<true>, const T1& first, const T2&) { return first; } template<typename T1, typename T2> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE const T2& choose(Cond<false>, const T1&, const T2& second) { return second; } template <typename T, typename X, typename Y> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T divup(const X x, const Y y) { return static_cast<T>((x + y - 1) / y); } template <typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T divup(const T x, const T y) { return static_cast<T>((x + y - 1) / y); } template <size_t n> struct max_n_1 { static const size_t size = n; }; template <> struct max_n_1<0> { static const size_t size = 1; }; // Default packet types template <typename Scalar, typename Device> struct PacketType : internal::packet_traits<Scalar> { typedef typename internal::packet_traits<Scalar>::type type; }; // For CUDA packet types when using a GpuDevice #if defined(EIGEN_USE_GPU) && defined(__CUDACC__) && defined(EIGEN_HAS_CUDA_FP16) template <> struct PacketType<half, GpuDevice> { typedef half2 type; static const int size = 2; enum { HasAdd = 1, HasSub = 1, HasMul = 1, HasNegate = 1, HasAbs = 1, HasArg = 0, HasAbs2 = 0, HasMin = 1, HasMax = 1, HasConj = 0, HasSetLinear = 0, HasBlend = 0, HasDiv = 1, HasSqrt = 1, HasRsqrt = 1, HasExp = 1, HasLog = 1, HasLog1p = 0, HasLog10 = 0, HasPow = 1, }; }; #endif #if defined(EIGEN_USE_SYCL) template <typename T> struct PacketType<T, SyclDevice> { typedef T type; static const int size = 1; enum { HasAdd = 0, HasSub = 0, HasMul = 0, HasNegate = 0, HasAbs = 0, HasArg = 0, HasAbs2 = 0, HasMin = 0, HasMax = 0, HasConj = 0, HasSetLinear = 0, HasBlend = 0 }; }; #endif // Tuple mimics std::pair but works on e.g. nvcc. template <typename U, typename V> struct Tuple { public: U first; V second; typedef U first_type; typedef V second_type; EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tuple() : first(), second() {} EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tuple(const U& f, const V& s) : first(f), second(s) {} EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tuple& operator= (const Tuple& rhs) { if (&rhs == this) return *this; first = rhs.first; second = rhs.second; return *this; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void swap(Tuple& rhs) { using numext::swap; swap(first, rhs.first); swap(second, rhs.second); } }; template <typename U, typename V> EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator==(const Tuple<U, V>& x, const Tuple<U, V>& y) { return (x.first == y.first && x.second == y.second); } template <typename U, typename V> EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator!=(const Tuple<U, V>& x, const Tuple<U, V>& y) { return !(x == y); } // Can't use std::pairs on cuda devices template <typename Idx> struct IndexPair { EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE IndexPair() : first(0), second(0) {} EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE IndexPair(Idx f, Idx s) : first(f), second(s) {} EIGEN_DEVICE_FUNC void set(IndexPair<Idx> val) { first = val.first; second = val.second; } Idx first; Idx second; }; #ifdef EIGEN_HAS_SFINAE namespace internal { template<typename IndexType, Index... Is> EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE array<Index, sizeof...(Is)> customIndices2Array(IndexType& idx, numeric_list<Index, Is...>) { return { idx[Is]... }; } template<typename IndexType> EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE array<Index, 0> customIndices2Array(IndexType&, numeric_list<Index>) { return array<Index, 0>(); } /** Make an array (for index/dimensions) out of a custom index */ template<typename Index, std::size_t NumIndices, typename IndexType> EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE array<Index, NumIndices> customIndices2Array(IndexType& idx) { return customIndices2Array(idx, typename gen_numeric_list<Index, NumIndices>::type{}); } template <typename B, typename D> struct is_base_of { typedef char (&yes)[1]; typedef char (&no)[2]; template <typename BB, typename DD> struct Host { operator BB*() const; operator DD*(); }; template<typename T> static yes check(D*, T); static no check(B*, int); static const bool value = sizeof(check(Host<B,D>(), int())) == sizeof(yes); }; } #endif } // namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_META_H
5,309
23.246575
104
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_PATCH_H #define EIGEN_CXX11_TENSOR_TENSOR_PATCH_H namespace Eigen { /** \class TensorPatch * \ingroup CXX11_Tensor_Module * * \brief Tensor patch class. * * */ namespace internal { template<typename PatchDim, typename XprType> struct traits<TensorPatchOp<PatchDim, XprType> > : public traits<XprType> { typedef typename XprType::Scalar Scalar; typedef traits<XprType> XprTraits; typedef typename XprTraits::StorageKind StorageKind; typedef typename XprTraits::Index Index; typedef typename XprType::Nested Nested; typedef typename remove_reference<Nested>::type _Nested; static const int NumDimensions = XprTraits::NumDimensions + 1; static const int Layout = XprTraits::Layout; }; template<typename PatchDim, typename XprType> struct eval<TensorPatchOp<PatchDim, XprType>, Eigen::Dense> { typedef const TensorPatchOp<PatchDim, XprType>& type; }; template<typename PatchDim, typename XprType> struct nested<TensorPatchOp<PatchDim, XprType>, 1, typename eval<TensorPatchOp<PatchDim, XprType> >::type> { typedef TensorPatchOp<PatchDim, XprType> type; }; } // end namespace internal template<typename PatchDim, typename XprType> class TensorPatchOp : public TensorBase<TensorPatchOp<PatchDim, XprType>, ReadOnlyAccessors> { public: typedef typename Eigen::internal::traits<TensorPatchOp>::Scalar Scalar; typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename Eigen::internal::nested<TensorPatchOp>::type Nested; typedef typename Eigen::internal::traits<TensorPatchOp>::StorageKind StorageKind; typedef typename Eigen::internal::traits<TensorPatchOp>::Index Index; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorPatchOp(const XprType& expr, const PatchDim& patch_dims) : m_xpr(expr), m_patch_dims(patch_dims) {} EIGEN_DEVICE_FUNC const PatchDim& patch_dims() const { return m_patch_dims; } EIGEN_DEVICE_FUNC const typename internal::remove_all<typename XprType::Nested>::type& expression() const { return m_xpr; } protected: typename XprType::Nested m_xpr; const PatchDim m_patch_dims; }; // Eval as rvalue template<typename PatchDim, typename ArgType, typename Device> struct TensorEvaluator<const TensorPatchOp<PatchDim, ArgType>, Device> { typedef TensorPatchOp<PatchDim, ArgType> XprType; typedef typename XprType::Index Index; static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value + 1; typedef DSizes<Index, NumDims> Dimensions; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType; static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; enum { IsAligned = false, PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess, Layout = TensorEvaluator<ArgType, Device>::Layout, CoordAccess = false, RawAccess = false }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : m_impl(op.expression(), device) { Index num_patches = 1; const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions(); const PatchDim& patch_dims = op.patch_dims(); if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { for (int i = 0; i < NumDims-1; ++i) { m_dimensions[i] = patch_dims[i]; num_patches *= (input_dims[i] - patch_dims[i] + 1); } m_dimensions[NumDims-1] = num_patches; m_inputStrides[0] = 1; m_patchStrides[0] = 1; for (int i = 1; i < NumDims-1; ++i) { m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1]; m_patchStrides[i] = m_patchStrides[i-1] * (input_dims[i-1] - patch_dims[i-1] + 1); } m_outputStrides[0] = 1; for (int i = 1; i < NumDims; ++i) { m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1]; } } else { for (int i = 0; i < NumDims-1; ++i) { m_dimensions[i+1] = patch_dims[i]; num_patches *= (input_dims[i] - patch_dims[i] + 1); } m_dimensions[0] = num_patches; m_inputStrides[NumDims-2] = 1; m_patchStrides[NumDims-2] = 1; for (int i = NumDims-3; i >= 0; --i) { m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1]; m_patchStrides[i] = m_patchStrides[i+1] * (input_dims[i+1] - patch_dims[i+1] + 1); } m_outputStrides[NumDims-1] = 1; for (int i = NumDims-2; i >= 0; --i) { m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1]; } } } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) { m_impl.evalSubExprsIfNeeded(NULL); return true; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { m_impl.cleanup(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { Index output_stride_index = (static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? NumDims - 1 : 0; // Find the location of the first element of the patch. Index patchIndex = index / m_outputStrides[output_stride_index]; // Find the offset of the element wrt the location of the first element. Index patchOffset = index - patchIndex * m_outputStrides[output_stride_index]; Index inputIndex = 0; if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { for (int i = NumDims - 2; i > 0; --i) { const Index patchIdx = patchIndex / m_patchStrides[i]; patchIndex -= patchIdx * m_patchStrides[i]; const Index offsetIdx = patchOffset / m_outputStrides[i]; patchOffset -= offsetIdx * m_outputStrides[i]; inputIndex += (patchIdx + offsetIdx) * m_inputStrides[i]; } } else { for (int i = 0; i < NumDims - 2; ++i) { const Index patchIdx = patchIndex / m_patchStrides[i]; patchIndex -= patchIdx * m_patchStrides[i]; const Index offsetIdx = patchOffset / m_outputStrides[i+1]; patchOffset -= offsetIdx * m_outputStrides[i+1]; inputIndex += (patchIdx + offsetIdx) * m_inputStrides[i]; } } inputIndex += (patchIndex + patchOffset); return m_impl.coeff(inputIndex); } template<int LoadMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); Index output_stride_index = (static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? NumDims - 1 : 0; Index indices[2] = {index, index + PacketSize - 1}; Index patchIndices[2] = {indices[0] / m_outputStrides[output_stride_index], indices[1] / m_outputStrides[output_stride_index]}; Index patchOffsets[2] = {indices[0] - patchIndices[0] * m_outputStrides[output_stride_index], indices[1] - patchIndices[1] * m_outputStrides[output_stride_index]}; Index inputIndices[2] = {0, 0}; if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { for (int i = NumDims - 2; i > 0; --i) { const Index patchIdx[2] = {patchIndices[0] / m_patchStrides[i], patchIndices[1] / m_patchStrides[i]}; patchIndices[0] -= patchIdx[0] * m_patchStrides[i]; patchIndices[1] -= patchIdx[1] * m_patchStrides[i]; const Index offsetIdx[2] = {patchOffsets[0] / m_outputStrides[i], patchOffsets[1] / m_outputStrides[i]}; patchOffsets[0] -= offsetIdx[0] * m_outputStrides[i]; patchOffsets[1] -= offsetIdx[1] * m_outputStrides[i]; inputIndices[0] += (patchIdx[0] + offsetIdx[0]) * m_inputStrides[i]; inputIndices[1] += (patchIdx[1] + offsetIdx[1]) * m_inputStrides[i]; } } else { for (int i = 0; i < NumDims - 2; ++i) { const Index patchIdx[2] = {patchIndices[0] / m_patchStrides[i], patchIndices[1] / m_patchStrides[i]}; patchIndices[0] -= patchIdx[0] * m_patchStrides[i]; patchIndices[1] -= patchIdx[1] * m_patchStrides[i]; const Index offsetIdx[2] = {patchOffsets[0] / m_outputStrides[i+1], patchOffsets[1] / m_outputStrides[i+1]}; patchOffsets[0] -= offsetIdx[0] * m_outputStrides[i+1]; patchOffsets[1] -= offsetIdx[1] * m_outputStrides[i+1]; inputIndices[0] += (patchIdx[0] + offsetIdx[0]) * m_inputStrides[i]; inputIndices[1] += (patchIdx[1] + offsetIdx[1]) * m_inputStrides[i]; } } inputIndices[0] += (patchIndices[0] + patchOffsets[0]); inputIndices[1] += (patchIndices[1] + patchOffsets[1]); if (inputIndices[1] - inputIndices[0] == PacketSize - 1) { PacketReturnType rslt = m_impl.template packet<Unaligned>(inputIndices[0]); return rslt; } else { EIGEN_ALIGN_MAX CoeffReturnType values[PacketSize]; values[0] = m_impl.coeff(inputIndices[0]); values[PacketSize-1] = m_impl.coeff(inputIndices[1]); for (int i = 1; i < PacketSize-1; ++i) { values[i] = coeff(index+i); } PacketReturnType rslt = internal::pload<PacketReturnType>(values); return rslt; } } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { const double compute_cost = NumDims * (TensorOpCost::DivCost<Index>() + TensorOpCost::MulCost<Index>() + 2 * TensorOpCost::AddCost<Index>()); return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, compute_cost, vectorized, PacketSize); } EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; } protected: Dimensions m_dimensions; array<Index, NumDims> m_outputStrides; array<Index, NumDims-1> m_inputStrides; array<Index, NumDims-1> m_patchStrides; TensorEvaluator<ArgType, Device> m_impl; }; } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_PATCH_H
10,687
38.585185
116
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Navdeep Jaitly <[email protected]> // Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_REVERSE_H #define EIGEN_CXX11_TENSOR_TENSOR_REVERSE_H namespace Eigen { /** \class TensorReverse * \ingroup CXX11_Tensor_Module * * \brief Tensor reverse elements class. * */ namespace internal { template<typename ReverseDimensions, typename XprType> struct traits<TensorReverseOp<ReverseDimensions, XprType> > : public traits<XprType> { typedef typename XprType::Scalar Scalar; typedef traits<XprType> XprTraits; typedef typename XprTraits::StorageKind StorageKind; typedef typename XprTraits::Index Index; typedef typename XprType::Nested Nested; typedef typename remove_reference<Nested>::type _Nested; static const int NumDimensions = XprTraits::NumDimensions; static const int Layout = XprTraits::Layout; }; template<typename ReverseDimensions, typename XprType> struct eval<TensorReverseOp<ReverseDimensions, XprType>, Eigen::Dense> { typedef const TensorReverseOp<ReverseDimensions, XprType>& type; }; template<typename ReverseDimensions, typename XprType> struct nested<TensorReverseOp<ReverseDimensions, XprType>, 1, typename eval<TensorReverseOp<ReverseDimensions, XprType> >::type> { typedef TensorReverseOp<ReverseDimensions, XprType> type; }; } // end namespace internal template<typename ReverseDimensions, typename XprType> class TensorReverseOp : public TensorBase<TensorReverseOp<ReverseDimensions, XprType>, WriteAccessors> { public: typedef typename Eigen::internal::traits<TensorReverseOp>::Scalar Scalar; typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename Eigen::internal::nested<TensorReverseOp>::type Nested; typedef typename Eigen::internal::traits<TensorReverseOp>::StorageKind StorageKind; typedef typename Eigen::internal::traits<TensorReverseOp>::Index Index; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorReverseOp( const XprType& expr, const ReverseDimensions& reverse_dims) : m_xpr(expr), m_reverse_dims(reverse_dims) { } EIGEN_DEVICE_FUNC const ReverseDimensions& reverse() const { return m_reverse_dims; } EIGEN_DEVICE_FUNC const typename internal::remove_all<typename XprType::Nested>::type& expression() const { return m_xpr; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorReverseOp& operator = (const TensorReverseOp& other) { typedef TensorAssignOp<TensorReverseOp, const TensorReverseOp> Assign; Assign assign(*this, other); internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice()); return *this; } template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorReverseOp& operator = (const OtherDerived& other) { typedef TensorAssignOp<TensorReverseOp, const OtherDerived> Assign; Assign assign(*this, other); internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice()); return *this; } protected: typename XprType::Nested m_xpr; const ReverseDimensions m_reverse_dims; }; // Eval as rvalue template<typename ReverseDimensions, typename ArgType, typename Device> struct TensorEvaluator<const TensorReverseOp<ReverseDimensions, ArgType>, Device> { typedef TensorReverseOp<ReverseDimensions, ArgType> XprType; typedef typename XprType::Index Index; static const int NumDims = internal::array_size<ReverseDimensions>::value; typedef DSizes<Index, NumDims> Dimensions; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType; static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; enum { IsAligned = false, PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess, Layout = TensorEvaluator<ArgType, Device>::Layout, CoordAccess = false, // to be implemented RawAccess = false }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : m_impl(op.expression(), device), m_reverse(op.reverse()) { // Reversing a scalar isn't supported yet. It would be a no-op anyway. EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE); // Compute strides m_dimensions = m_impl.dimensions(); if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { m_strides[0] = 1; for (int i = 1; i < NumDims; ++i) { m_strides[i] = m_strides[i-1] * m_dimensions[i-1]; } } else { m_strides[NumDims-1] = 1; for (int i = NumDims - 2; i >= 0; --i) { m_strides[i] = m_strides[i+1] * m_dimensions[i+1]; } } } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) { m_impl.evalSubExprsIfNeeded(NULL); return true; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { m_impl.cleanup(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index reverseIndex( Index index) const { eigen_assert(index < dimensions().TotalSize()); Index inputIndex = 0; if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { for (int i = NumDims - 1; i > 0; --i) { Index idx = index / m_strides[i]; index -= idx * m_strides[i]; if (m_reverse[i]) { idx = m_dimensions[i] - idx - 1; } inputIndex += idx * m_strides[i] ; } if (m_reverse[0]) { inputIndex += (m_dimensions[0] - index - 1); } else { inputIndex += index; } } else { for (int i = 0; i < NumDims - 1; ++i) { Index idx = index / m_strides[i]; index -= idx * m_strides[i]; if (m_reverse[i]) { idx = m_dimensions[i] - idx - 1; } inputIndex += idx * m_strides[i] ; } if (m_reverse[NumDims-1]) { inputIndex += (m_dimensions[NumDims-1] - index - 1); } else { inputIndex += index; } } return inputIndex; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff( Index index) const { return m_impl.coeff(reverseIndex(index)); } template<int LoadMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); // TODO(ndjaitly): write a better packing routine that uses // local structure. EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize]; for (int i = 0; i < PacketSize; ++i) { values[i] = coeff(index+i); } PacketReturnType rslt = internal::pload<PacketReturnType>(values); return rslt; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { double compute_cost = NumDims * (2 * TensorOpCost::AddCost<Index>() + 2 * TensorOpCost::MulCost<Index>() + TensorOpCost::DivCost<Index>()); for (int i = 0; i < NumDims; ++i) { if (m_reverse[i]) { compute_cost += 2 * TensorOpCost::AddCost<Index>(); } } return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, compute_cost, false /* vectorized */, PacketSize); } EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; } protected: Dimensions m_dimensions; array<Index, NumDims> m_strides; TensorEvaluator<ArgType, Device> m_impl; ReverseDimensions m_reverse; }; // Eval as lvalue template <typename ReverseDimensions, typename ArgType, typename Device> struct TensorEvaluator<TensorReverseOp<ReverseDimensions, ArgType>, Device> : public TensorEvaluator<const TensorReverseOp<ReverseDimensions, ArgType>, Device> { typedef TensorEvaluator<const TensorReverseOp<ReverseDimensions, ArgType>, Device> Base; typedef TensorReverseOp<ReverseDimensions, ArgType> XprType; typedef typename XprType::Index Index; static const int NumDims = internal::array_size<ReverseDimensions>::value; typedef DSizes<Index, NumDims> Dimensions; enum { IsAligned = false, PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess, Layout = TensorEvaluator<ArgType, Device>::Layout, CoordAccess = false, // to be implemented RawAccess = false }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : Base(op, device) {} typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType; static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return this->m_dimensions; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return this->m_impl.coeffRef(this->reverseIndex(index)); } template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writePacket(Index index, const PacketReturnType& x) { EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); // This code is pilfered from TensorMorphing.h EIGEN_ALIGN_MAX CoeffReturnType values[PacketSize]; internal::pstore<CoeffReturnType, PacketReturnType>(values, x); for (int i = 0; i < PacketSize; ++i) { this->coeffRef(index+i) = values[i]; } } }; } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_REVERSE_H
10,527
35.429066
90
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorScan.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Igor Babuschkin <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_SCAN_H #define EIGEN_CXX11_TENSOR_TENSOR_SCAN_H namespace Eigen { namespace internal { template <typename Op, typename XprType> struct traits<TensorScanOp<Op, XprType> > : public traits<XprType> { typedef typename XprType::Scalar Scalar; typedef traits<XprType> XprTraits; typedef typename XprTraits::StorageKind StorageKind; typedef typename XprType::Nested Nested; typedef typename remove_reference<Nested>::type _Nested; static const int NumDimensions = XprTraits::NumDimensions; static const int Layout = XprTraits::Layout; }; template<typename Op, typename XprType> struct eval<TensorScanOp<Op, XprType>, Eigen::Dense> { typedef const TensorScanOp<Op, XprType>& type; }; template<typename Op, typename XprType> struct nested<TensorScanOp<Op, XprType>, 1, typename eval<TensorScanOp<Op, XprType> >::type> { typedef TensorScanOp<Op, XprType> type; }; } // end namespace internal /** \class TensorScan * \ingroup CXX11_Tensor_Module * * \brief Tensor scan class. */ template <typename Op, typename XprType> class TensorScanOp : public TensorBase<TensorScanOp<Op, XprType>, ReadOnlyAccessors> { public: typedef typename Eigen::internal::traits<TensorScanOp>::Scalar Scalar; typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename Eigen::internal::nested<TensorScanOp>::type Nested; typedef typename Eigen::internal::traits<TensorScanOp>::StorageKind StorageKind; typedef typename Eigen::internal::traits<TensorScanOp>::Index Index; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorScanOp( const XprType& expr, const Index& axis, bool exclusive = false, const Op& op = Op()) : m_expr(expr), m_axis(axis), m_accumulator(op), m_exclusive(exclusive) {} EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Index axis() const { return m_axis; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const XprType& expression() const { return m_expr; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Op accumulator() const { return m_accumulator; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool exclusive() const { return m_exclusive; } protected: typename XprType::Nested m_expr; const Index m_axis; const Op m_accumulator; const bool m_exclusive; }; template <typename Self, typename Reducer, typename Device> struct ScanLauncher; // Eval as rvalue template <typename Op, typename ArgType, typename Device> struct TensorEvaluator<const TensorScanOp<Op, ArgType>, Device> { typedef TensorScanOp<Op, ArgType> XprType; typedef typename XprType::Index Index; static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value; typedef DSizes<Index, NumDims> Dimensions; typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType; typedef TensorEvaluator<const TensorScanOp<Op, ArgType>, Device> Self; enum { IsAligned = false, PacketAccess = (internal::unpacket_traits<PacketReturnType>::size > 1), BlockAccess = false, Layout = TensorEvaluator<ArgType, Device>::Layout, CoordAccess = false, RawAccess = true }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : m_impl(op.expression(), device), m_device(device), m_exclusive(op.exclusive()), m_accumulator(op.accumulator()), m_size(m_impl.dimensions()[op.axis()]), m_stride(1), m_output(NULL) { // Accumulating a scalar isn't supported. EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE); eigen_assert(op.axis() >= 0 && op.axis() < NumDims); // Compute stride of scan axis const Dimensions& dims = m_impl.dimensions(); if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { for (int i = 0; i < op.axis(); ++i) { m_stride = m_stride * dims[i]; } } else { for (int i = NumDims - 1; i > op.axis(); --i) { m_stride = m_stride * dims[i]; } } } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_impl.dimensions(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Index& stride() const { return m_stride; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Index& size() const { return m_size; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Op& accumulator() const { return m_accumulator; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool exclusive() const { return m_exclusive; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const TensorEvaluator<ArgType, Device>& inner() const { return m_impl; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Device& device() const { return m_device; } EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) { m_impl.evalSubExprsIfNeeded(NULL); ScanLauncher<Self, Op, Device> launcher; if (data) { launcher(*this, data); return false; } const Index total_size = internal::array_prod(dimensions()); m_output = static_cast<CoeffReturnType*>(m_device.allocate(total_size * sizeof(Scalar))); launcher(*this, m_output); return true; } template<int LoadMode> EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const { return internal::ploadt<PacketReturnType, LoadMode>(m_output + index); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType* data() const { return m_output; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_output[index]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const { return TensorOpCost(sizeof(CoeffReturnType), 0, 0); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { if (m_output != NULL) { m_device.deallocate(m_output); m_output = NULL; } m_impl.cleanup(); } protected: TensorEvaluator<ArgType, Device> m_impl; const Device& m_device; const bool m_exclusive; Op m_accumulator; const Index m_size; Index m_stride; CoeffReturnType* m_output; }; // CPU implementation of scan // TODO(ibab) This single-threaded implementation should be parallelized, // at least by running multiple scans at the same time. template <typename Self, typename Reducer, typename Device> struct ScanLauncher { void operator()(Self& self, typename Self::CoeffReturnType *data) { Index total_size = internal::array_prod(self.dimensions()); // We fix the index along the scan axis to 0 and perform a // scan per remaining entry. The iteration is split into two nested // loops to avoid an integer division by keeping track of each idx1 and idx2. for (Index idx1 = 0; idx1 < total_size; idx1 += self.stride() * self.size()) { for (Index idx2 = 0; idx2 < self.stride(); idx2++) { // Calculate the starting offset for the scan Index offset = idx1 + idx2; // Compute the scan along the axis, starting at the calculated offset typename Self::CoeffReturnType accum = self.accumulator().initialize(); for (Index idx3 = 0; idx3 < self.size(); idx3++) { Index curr = offset + idx3 * self.stride(); if (self.exclusive()) { data[curr] = self.accumulator().finalize(accum); self.accumulator().reduce(self.inner().coeff(curr), &accum); } else { self.accumulator().reduce(self.inner().coeff(curr), &accum); data[curr] = self.accumulator().finalize(accum); } } } } } }; #if defined(EIGEN_USE_GPU) && defined(__CUDACC__) // GPU implementation of scan // TODO(ibab) This placeholder implementation performs multiple scans in // parallel, but it would be better to use a parallel scan algorithm and // optimize memory access. template <typename Self, typename Reducer> __global__ void ScanKernel(Self self, Index total_size, typename Self::CoeffReturnType* data) { // Compute offset as in the CPU version Index val = threadIdx.x + blockIdx.x * blockDim.x; Index offset = (val / self.stride()) * self.stride() * self.size() + val % self.stride(); if (offset + (self.size() - 1) * self.stride() < total_size) { // Compute the scan along the axis, starting at the calculated offset typename Self::CoeffReturnType accum = self.accumulator().initialize(); for (Index idx = 0; idx < self.size(); idx++) { Index curr = offset + idx * self.stride(); if (self.exclusive()) { data[curr] = self.accumulator().finalize(accum); self.accumulator().reduce(self.inner().coeff(curr), &accum); } else { self.accumulator().reduce(self.inner().coeff(curr), &accum); data[curr] = self.accumulator().finalize(accum); } } } __syncthreads(); } template <typename Self, typename Reducer> struct ScanLauncher<Self, Reducer, GpuDevice> { void operator()(const Self& self, typename Self::CoeffReturnType* data) { Index total_size = internal::array_prod(self.dimensions()); Index num_blocks = (total_size / self.size() + 63) / 64; Index block_size = 64; LAUNCH_CUDA_KERNEL((ScanKernel<Self, Reducer>), num_blocks, block_size, 0, self.device(), self, total_size, data); } }; #endif // EIGEN_USE_GPU && __CUDACC__ } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_SCAN_H
9,941
33.520833
119
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_SHUFFLING_H #define EIGEN_CXX11_TENSOR_TENSOR_SHUFFLING_H namespace Eigen { /** \class TensorShuffling * \ingroup CXX11_Tensor_Module * * \brief Tensor shuffling class. * * */ namespace internal { template<typename Shuffle, typename XprType> struct traits<TensorShufflingOp<Shuffle, XprType> > : public traits<XprType> { typedef typename XprType::Scalar Scalar; typedef traits<XprType> XprTraits; typedef typename XprTraits::StorageKind StorageKind; typedef typename XprTraits::Index Index; typedef typename XprType::Nested Nested; typedef typename remove_reference<Nested>::type _Nested; static const int NumDimensions = XprTraits::NumDimensions; static const int Layout = XprTraits::Layout; }; template<typename Shuffle, typename XprType> struct eval<TensorShufflingOp<Shuffle, XprType>, Eigen::Dense> { typedef const TensorShufflingOp<Shuffle, XprType>& type; }; template<typename Shuffle, typename XprType> struct nested<TensorShufflingOp<Shuffle, XprType>, 1, typename eval<TensorShufflingOp<Shuffle, XprType> >::type> { typedef TensorShufflingOp<Shuffle, XprType> type; }; } // end namespace internal template<typename Shuffle, typename XprType> class TensorShufflingOp : public TensorBase<TensorShufflingOp<Shuffle, XprType> > { public: typedef typename Eigen::internal::traits<TensorShufflingOp>::Scalar Scalar; typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename Eigen::internal::nested<TensorShufflingOp>::type Nested; typedef typename Eigen::internal::traits<TensorShufflingOp>::StorageKind StorageKind; typedef typename Eigen::internal::traits<TensorShufflingOp>::Index Index; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorShufflingOp(const XprType& expr, const Shuffle& shuffle) : m_xpr(expr), m_shuffle(shuffle) {} EIGEN_DEVICE_FUNC const Shuffle& shufflePermutation() const { return m_shuffle; } EIGEN_DEVICE_FUNC const typename internal::remove_all<typename XprType::Nested>::type& expression() const { return m_xpr; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorShufflingOp& operator = (const TensorShufflingOp& other) { typedef TensorAssignOp<TensorShufflingOp, const TensorShufflingOp> Assign; Assign assign(*this, other); internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice()); return *this; } template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorShufflingOp& operator = (const OtherDerived& other) { typedef TensorAssignOp<TensorShufflingOp, const OtherDerived> Assign; Assign assign(*this, other); internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice()); return *this; } protected: typename XprType::Nested m_xpr; const Shuffle m_shuffle; }; // Eval as rvalue template<typename Shuffle, typename ArgType, typename Device> struct TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device> { typedef TensorShufflingOp<Shuffle, ArgType> XprType; typedef typename XprType::Index Index; static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value; typedef DSizes<Index, NumDims> Dimensions; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType; static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; enum { IsAligned = false, PacketAccess = (internal::packet_traits<Scalar>::size > 1), Layout = TensorEvaluator<ArgType, Device>::Layout, CoordAccess = false, // to be implemented RawAccess = false }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : m_impl(op.expression(), device) { const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions(); const Shuffle& shuffle = op.shufflePermutation(); for (int i = 0; i < NumDims; ++i) { m_dimensions[i] = input_dims[shuffle[i]]; } array<Index, NumDims> inputStrides; if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { inputStrides[0] = 1; m_outputStrides[0] = 1; for (int i = 1; i < NumDims; ++i) { inputStrides[i] = inputStrides[i - 1] * input_dims[i - 1]; m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1]; } } else { inputStrides[NumDims - 1] = 1; m_outputStrides[NumDims - 1] = 1; for (int i = NumDims - 2; i >= 0; --i) { inputStrides[i] = inputStrides[i + 1] * input_dims[i + 1]; m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1]; } } for (int i = 0; i < NumDims; ++i) { m_inputStrides[i] = inputStrides[shuffle[i]]; } } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) { m_impl.evalSubExprsIfNeeded(NULL); return true; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { m_impl.cleanup(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_impl.coeff(srcCoeff(index)); } template<int LoadMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize]; for (int i = 0; i < PacketSize; ++i) { values[i] = coeff(index+i); } PacketReturnType rslt = internal::pload<PacketReturnType>(values); return rslt; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { const double compute_cost = NumDims * (2 * TensorOpCost::AddCost<Index>() + 2 * TensorOpCost::MulCost<Index>() + TensorOpCost::DivCost<Index>()); return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, compute_cost, false /* vectorized */, PacketSize); } EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; } protected: EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const { Index inputIndex = 0; if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { for (int i = NumDims - 1; i > 0; --i) { const Index idx = index / m_outputStrides[i]; inputIndex += idx * m_inputStrides[i]; index -= idx * m_outputStrides[i]; } return inputIndex + index * m_inputStrides[0]; } else { for (int i = 0; i < NumDims - 1; ++i) { const Index idx = index / m_outputStrides[i]; inputIndex += idx * m_inputStrides[i]; index -= idx * m_outputStrides[i]; } return inputIndex + index * m_inputStrides[NumDims - 1]; } } Dimensions m_dimensions; array<Index, NumDims> m_outputStrides; array<Index, NumDims> m_inputStrides; TensorEvaluator<ArgType, Device> m_impl; }; // Eval as lvalue template<typename Shuffle, typename ArgType, typename Device> struct TensorEvaluator<TensorShufflingOp<Shuffle, ArgType>, Device> : public TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device> { typedef TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device> Base; typedef TensorShufflingOp<Shuffle, ArgType> XprType; typedef typename XprType::Index Index; static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value; typedef DSizes<Index, NumDims> Dimensions; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType; static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; enum { IsAligned = false, PacketAccess = (internal::packet_traits<Scalar>::size > 1), RawAccess = false }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : Base(op, device) { } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) { return this->m_impl.coeffRef(this->srcCoeff(index)); } template <int StoreMode> EIGEN_STRONG_INLINE void writePacket(Index index, const PacketReturnType& x) { EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize]; internal::pstore<CoeffReturnType, PacketReturnType>(values, x); for (int i = 0; i < PacketSize; ++i) { this->coeffRef(index+i) = values[i]; } } }; } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_SHUFFLING_H
9,489
34.811321
112
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_STRIDING_H #define EIGEN_CXX11_TENSOR_TENSOR_STRIDING_H namespace Eigen { /** \class TensorStriding * \ingroup CXX11_Tensor_Module * * \brief Tensor striding class. * * */ namespace internal { template<typename Strides, typename XprType> struct traits<TensorStridingOp<Strides, XprType> > : public traits<XprType> { typedef typename XprType::Scalar Scalar; typedef traits<XprType> XprTraits; typedef typename XprTraits::StorageKind StorageKind; typedef typename XprTraits::Index Index; typedef typename XprType::Nested Nested; typedef typename remove_reference<Nested>::type _Nested; static const int NumDimensions = XprTraits::NumDimensions; static const int Layout = XprTraits::Layout; }; template<typename Strides, typename XprType> struct eval<TensorStridingOp<Strides, XprType>, Eigen::Dense> { typedef const TensorStridingOp<Strides, XprType>& type; }; template<typename Strides, typename XprType> struct nested<TensorStridingOp<Strides, XprType>, 1, typename eval<TensorStridingOp<Strides, XprType> >::type> { typedef TensorStridingOp<Strides, XprType> type; }; } // end namespace internal template<typename Strides, typename XprType> class TensorStridingOp : public TensorBase<TensorStridingOp<Strides, XprType> > { public: typedef typename Eigen::internal::traits<TensorStridingOp>::Scalar Scalar; typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename Eigen::internal::nested<TensorStridingOp>::type Nested; typedef typename Eigen::internal::traits<TensorStridingOp>::StorageKind StorageKind; typedef typename Eigen::internal::traits<TensorStridingOp>::Index Index; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorStridingOp(const XprType& expr, const Strides& dims) : m_xpr(expr), m_dims(dims) {} EIGEN_DEVICE_FUNC const Strides& strides() const { return m_dims; } EIGEN_DEVICE_FUNC const typename internal::remove_all<typename XprType::Nested>::type& expression() const { return m_xpr; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorStridingOp& operator = (const TensorStridingOp& other) { typedef TensorAssignOp<TensorStridingOp, const TensorStridingOp> Assign; Assign assign(*this, other); internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice()); return *this; } template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorStridingOp& operator = (const OtherDerived& other) { typedef TensorAssignOp<TensorStridingOp, const OtherDerived> Assign; Assign assign(*this, other); internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice()); return *this; } protected: typename XprType::Nested m_xpr; const Strides m_dims; }; // Eval as rvalue template<typename Strides, typename ArgType, typename Device> struct TensorEvaluator<const TensorStridingOp<Strides, ArgType>, Device> { typedef TensorStridingOp<Strides, ArgType> XprType; typedef typename XprType::Index Index; static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value; typedef DSizes<Index, NumDims> Dimensions; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType; static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; enum { IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/false, PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess, Layout = TensorEvaluator<ArgType, Device>::Layout, CoordAccess = false, // to be implemented RawAccess = false }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : m_impl(op.expression(), device) { m_dimensions = m_impl.dimensions(); for (int i = 0; i < NumDims; ++i) { m_dimensions[i] = ceilf(static_cast<float>(m_dimensions[i]) / op.strides()[i]); } const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions(); if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { m_outputStrides[0] = 1; m_inputStrides[0] = 1; for (int i = 1; i < NumDims; ++i) { m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1]; m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1]; m_inputStrides[i-1] *= op.strides()[i-1]; } m_inputStrides[NumDims-1] *= op.strides()[NumDims-1]; } else { // RowMajor m_outputStrides[NumDims-1] = 1; m_inputStrides[NumDims-1] = 1; for (int i = NumDims - 2; i >= 0; --i) { m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1]; m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1]; m_inputStrides[i+1] *= op.strides()[i+1]; } m_inputStrides[0] *= op.strides()[0]; } } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) { m_impl.evalSubExprsIfNeeded(NULL); return true; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { m_impl.cleanup(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_impl.coeff(srcCoeff(index)); } template<int LoadMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); Index inputIndices[] = {0, 0}; Index indices[] = {index, index + PacketSize - 1}; if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { for (int i = NumDims - 1; i > 0; --i) { const Index idx0 = indices[0] / m_outputStrides[i]; const Index idx1 = indices[1] / m_outputStrides[i]; inputIndices[0] += idx0 * m_inputStrides[i]; inputIndices[1] += idx1 * m_inputStrides[i]; indices[0] -= idx0 * m_outputStrides[i]; indices[1] -= idx1 * m_outputStrides[i]; } inputIndices[0] += indices[0] * m_inputStrides[0]; inputIndices[1] += indices[1] * m_inputStrides[0]; } else { // RowMajor for (int i = 0; i < NumDims - 1; ++i) { const Index idx0 = indices[0] / m_outputStrides[i]; const Index idx1 = indices[1] / m_outputStrides[i]; inputIndices[0] += idx0 * m_inputStrides[i]; inputIndices[1] += idx1 * m_inputStrides[i]; indices[0] -= idx0 * m_outputStrides[i]; indices[1] -= idx1 * m_outputStrides[i]; } inputIndices[0] += indices[0] * m_inputStrides[NumDims-1]; inputIndices[1] += indices[1] * m_inputStrides[NumDims-1]; } if (inputIndices[1] - inputIndices[0] == PacketSize - 1) { PacketReturnType rslt = m_impl.template packet<Unaligned>(inputIndices[0]); return rslt; } else { EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize]; values[0] = m_impl.coeff(inputIndices[0]); values[PacketSize-1] = m_impl.coeff(inputIndices[1]); for (int i = 1; i < PacketSize-1; ++i) { values[i] = coeff(index+i); } PacketReturnType rslt = internal::pload<PacketReturnType>(values); return rslt; } } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { double compute_cost = (NumDims - 1) * (TensorOpCost::AddCost<Index>() + TensorOpCost::MulCost<Index>() + TensorOpCost::DivCost<Index>()) + TensorOpCost::MulCost<Index>(); if (vectorized) { compute_cost *= 2; // packet() computes two indices } const int innerDim = (static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? 0 : (NumDims - 1); return m_impl.costPerCoeff(vectorized && m_inputStrides[innerDim] == 1) + // Computation is not vectorized per se, but it is done once per packet. TensorOpCost(0, 0, compute_cost, vectorized, PacketSize); } EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; } protected: EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const { Index inputIndex = 0; if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { for (int i = NumDims - 1; i > 0; --i) { const Index idx = index / m_outputStrides[i]; inputIndex += idx * m_inputStrides[i]; index -= idx * m_outputStrides[i]; } inputIndex += index * m_inputStrides[0]; } else { // RowMajor for (int i = 0; i < NumDims - 1; ++i) { const Index idx = index / m_outputStrides[i]; inputIndex += idx * m_inputStrides[i]; index -= idx * m_outputStrides[i]; } inputIndex += index * m_inputStrides[NumDims-1]; } return inputIndex; } Dimensions m_dimensions; array<Index, NumDims> m_outputStrides; array<Index, NumDims> m_inputStrides; TensorEvaluator<ArgType, Device> m_impl; }; // Eval as lvalue template<typename Strides, typename ArgType, typename Device> struct TensorEvaluator<TensorStridingOp<Strides, ArgType>, Device> : public TensorEvaluator<const TensorStridingOp<Strides, ArgType>, Device> { typedef TensorStridingOp<Strides, ArgType> XprType; typedef TensorEvaluator<const XprType, Device> Base; // typedef typename XprType::Index Index; static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value; // typedef DSizes<Index, NumDims> Dimensions; enum { IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/false, PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess, Layout = TensorEvaluator<ArgType, Device>::Layout, CoordAccess = false, // to be implemented RawAccess = false }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : Base(op, device) { } typedef typename XprType::Index Index; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType; static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return this->m_impl.coeffRef(this->srcCoeff(index)); } template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writePacket(Index index, const PacketReturnType& x) { EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) eigen_assert(index+PacketSize-1 < this->dimensions().TotalSize()); Index inputIndices[] = {0, 0}; Index indices[] = {index, index + PacketSize - 1}; if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) { for (int i = NumDims - 1; i > 0; --i) { const Index idx0 = indices[0] / this->m_outputStrides[i]; const Index idx1 = indices[1] / this->m_outputStrides[i]; inputIndices[0] += idx0 * this->m_inputStrides[i]; inputIndices[1] += idx1 * this->m_inputStrides[i]; indices[0] -= idx0 * this->m_outputStrides[i]; indices[1] -= idx1 * this->m_outputStrides[i]; } inputIndices[0] += indices[0] * this->m_inputStrides[0]; inputIndices[1] += indices[1] * this->m_inputStrides[0]; } else { // RowMajor for (int i = 0; i < NumDims - 1; ++i) { const Index idx0 = indices[0] / this->m_outputStrides[i]; const Index idx1 = indices[1] / this->m_outputStrides[i]; inputIndices[0] += idx0 * this->m_inputStrides[i]; inputIndices[1] += idx1 * this->m_inputStrides[i]; indices[0] -= idx0 * this->m_outputStrides[i]; indices[1] -= idx1 * this->m_outputStrides[i]; } inputIndices[0] += indices[0] * this->m_inputStrides[NumDims-1]; inputIndices[1] += indices[1] * this->m_inputStrides[NumDims-1]; } if (inputIndices[1] - inputIndices[0] == PacketSize - 1) { this->m_impl.template writePacket<Unaligned>(inputIndices[0], x); } else { EIGEN_ALIGN_MAX Scalar values[PacketSize]; internal::pstore<Scalar, PacketReturnType>(values, x); this->m_impl.coeffRef(inputIndices[0]) = values[0]; this->m_impl.coeffRef(inputIndices[1]) = values[PacketSize-1]; for (int i = 1; i < PacketSize-1; ++i) { this->coeffRef(index+i) = values[i]; } } } }; } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_STRIDING_H
13,196
37.929204
112
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorSycl.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Mehdi Goli Codeplay Software Ltd. // Ralph Potter Codeplay Software Ltd. // Luke Iwanski Codeplay Software Ltd. // Contact: [email protected] // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. // General include header of SYCL target for Tensor Module #ifndef UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_H #define UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_H #ifdef EIGEN_USE_SYCL // global pointer to set different attribute state for a class template <class T> struct MakeGlobalPointer { typedef typename cl::sycl::global_ptr<T>::pointer_t Type; }; // global pointer to set different attribute state for a class template <class T> struct MakeLocalPointer { typedef typename cl::sycl::local_ptr<T>::pointer_t Type; }; namespace Eigen { namespace TensorSycl { namespace internal { /// This struct is used for special expression nodes with no operations (for example assign and selectOP). struct NoOP; template<bool IsConst, typename T> struct GetType{ typedef const T Type; }; template<typename T> struct GetType<false, T>{ typedef T Type; }; } } } // tuple construction #include "TensorSyclTuple.h" // counting number of leaf at compile time #include "TensorSyclLeafCount.h" // The index PlaceHolder takes the actual expression and replaces the actual // data on it with the place holder. It uses the same pre-order expression tree // traverse as the leaf count in order to give the right access number to each // node in the expression #include "TensorSyclPlaceHolderExpr.h" // creation of an accessor tuple from a tuple of SYCL buffers #include "TensorSyclExtractAccessor.h" // this is used to change the address space type in tensor map for GPU #include "TensorSyclConvertToDeviceExpression.h" // this is used to extract the functors #include "TensorSyclExtractFunctors.h" // this is used to create tensormap on the device // this is used to construct the expression on the device #include "TensorSyclExprConstructor.h" /// this is used for extracting tensor reduction #include "TensorReductionSycl.h" // kernel execution using fusion #include "TensorSyclRun.h" #endif // end of EIGEN_USE_SYCL #endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_H
2,446
28.481928
106
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorSyclConvertToDeviceExpression.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Mehdi Goli Codeplay Software Ltd. // Ralph Potter Codeplay Software Ltd. // Luke Iwanski Codeplay Software Ltd. // Contact: <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. /***************************************************************** * TensorSyclConvertToDeviceExpression.h * * \brief: * Conversion from host pointer to device pointer * inside leaf nodes of the expression. * *****************************************************************/ #ifndef UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_CONVERT_TO_DEVICE_EXPRESSION_HPP #define UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_CONVERT_TO_DEVICE_EXPRESSION_HPP namespace Eigen { namespace TensorSycl { namespace internal { /// \struct ConvertToDeviceExpression /// \brief This struct is used to convert the MakePointer in the host expression /// to the MakeGlobalPointer for the device expression. For the leafNodes /// containing the pointer. This is due to the fact that the address space of /// the pointer T* is different on the host and the device. template <typename Expr> struct ConvertToDeviceExpression; template<template<class...> class NonOpCategory, bool IsConst, typename... Args> struct NonOpConversion{ typedef typename GetType<IsConst, NonOpCategory<typename ConvertToDeviceExpression<Args>::Type...> >::Type Type; }; template<template<class, template <class> class > class NonOpCategory, bool IsConst, typename Args> struct DeviceConvertor{ typedef typename GetType<IsConst, NonOpCategory<typename ConvertToDeviceExpression<Args>::Type, MakeGlobalPointer> >::Type Type; }; /// specialisation of the \ref ConvertToDeviceExpression struct when the node /// type is TensorMap #define TENSORMAPCONVERT(CVQual)\ template <typename Scalar_, int Options_, int Options2_, int NumIndices_, typename IndexType_, template <class> class MakePointer_>\ struct ConvertToDeviceExpression<CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options2_, MakePointer_> > {\ typedef CVQual TensorMap<Tensor<Scalar_, NumIndices_, Options_, IndexType_>, Options2_, MakeGlobalPointer> Type;\ }; TENSORMAPCONVERT(const) TENSORMAPCONVERT() #undef TENSORMAPCONVERT /// specialisation of the \ref ConvertToDeviceExpression struct when the node /// type is TensorCwiseNullaryOp, TensorCwiseUnaryOp, TensorCwiseBinaryOp, TensorCwiseTernaryOp, TensorBroadcastingOp #define CATEGORYCONVERT(CVQual)\ template <template<class, class...> class Category, typename OP, typename... subExprs>\ struct ConvertToDeviceExpression<CVQual Category<OP, subExprs...> > {\ typedef CVQual Category<OP, typename ConvertToDeviceExpression<subExprs>::Type... > Type;\ }; CATEGORYCONVERT(const) CATEGORYCONVERT() #undef CATEGORYCONVERT /// specialisation of the \ref ConvertToDeviceExpression struct when the node /// type is TensorCwiseSelectOp #define SELECTOPCONVERT(CVQual, Res)\ template <typename IfExpr, typename ThenExpr, typename ElseExpr>\ struct ConvertToDeviceExpression<CVQual TensorSelectOp<IfExpr, ThenExpr, ElseExpr> >\ : NonOpConversion<TensorSelectOp, Res, IfExpr, ThenExpr, ElseExpr> {}; SELECTOPCONVERT(const, true) SELECTOPCONVERT(, false) #undef SELECTOPCONVERT /// specialisation of the \ref ConvertToDeviceExpression struct when the node /// type is const AssingOP #define ASSIGNCONVERT(CVQual, Res)\ template <typename LHSExpr, typename RHSExpr>\ struct ConvertToDeviceExpression<CVQual TensorAssignOp<LHSExpr, RHSExpr> >\ : NonOpConversion<TensorAssignOp, Res, LHSExpr, RHSExpr>{}; ASSIGNCONVERT(const, true) ASSIGNCONVERT(, false) #undef ASSIGNCONVERT /// specialisation of the \ref ConvertToDeviceExpression struct when the node /// type is either TensorForcedEvalOp or TensorEvalToOp #define KERNELBROKERCONVERT(CVQual, Res, ExprNode)\ template <typename Expr>\ struct ConvertToDeviceExpression<CVQual ExprNode<Expr> > \ : DeviceConvertor<ExprNode, Res, Expr>{}; KERNELBROKERCONVERT(const, true, TensorForcedEvalOp) KERNELBROKERCONVERT(, false, TensorForcedEvalOp) KERNELBROKERCONVERT(const, true, TensorEvalToOp) KERNELBROKERCONVERT(, false, TensorEvalToOp) #undef KERNELBROKERCONVERT /// specialisation of the \ref ConvertToDeviceExpression struct when the node type is TensorReductionOp #define KERNELBROKERCONVERTREDUCTION(CVQual)\ template <typename OP, typename Dim, typename subExpr, template <class> class MakePointer_>\ struct ConvertToDeviceExpression<CVQual TensorReductionOp<OP, Dim, subExpr, MakePointer_> > {\ typedef CVQual TensorReductionOp<OP, Dim, typename ConvertToDeviceExpression<subExpr>::Type, MakeGlobalPointer> Type;\ }; KERNELBROKERCONVERTREDUCTION(const) KERNELBROKERCONVERTREDUCTION() #undef KERNELBROKERCONVERTREDUCTION } // namespace internal } // namespace TensorSycl } // namespace Eigen #endif // UNSUPPORTED_EIGEN_CXX1
5,046
40.368852
132
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorSyclTuple.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Mehdi Goli Codeplay Software Ltd. // Ralph Potter Codeplay Software Ltd. // Luke Iwanski Codeplay Software Ltd. // Contact: <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. /***************************************************************** * TensroSyclTuple.h * * \brief: * Minimal implementation of std::tuple that can be used inside a SYCL kernel. * *****************************************************************/ #ifndef UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_TUPLE_HPP #define UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_TUPLE_HPP namespace utility { namespace tuple { /// \struct StaticIf /// \brief The StaticIf struct is used to statically choose the type based on the /// condition. template <bool, typename T = void> struct StaticIf; /// \brief specialisation of the \ref StaticIf when the condition is true template <typename T> struct StaticIf<true, T> { typedef T type; }; /// \struct Tuple /// \brief is a fixed-size collection of heterogeneous values /// \ztparam Ts... - the types of the elements that the tuple stores. /// Empty list is supported. template <class... Ts> struct Tuple {}; /// \brief specialisation of the \ref Tuple class when the tuple has at least /// one element. /// \tparam T : the type of the first element in the tuple. /// \tparam Ts... the rest of the elements in the tuple. Ts... can be empty. template <class T, class... Ts> struct Tuple<T, Ts...> { Tuple(T t, Ts... ts) : head(t), tail(ts...) {} T head; Tuple<Ts...> tail; }; ///\ struct ElemTypeHolder /// \brief ElemTypeHolder class is used to specify the types of the /// elements inside the tuple /// \tparam size_t the number of elements inside the tuple /// \tparam class the tuple class template <size_t, class> struct ElemTypeHolder; /// \brief specialisation of the \ref ElemTypeHolder class when the number of /// elements inside the tuple is 1 template <class T, class... Ts> struct ElemTypeHolder<0, Tuple<T, Ts...> > { typedef T type; }; /// \brief specialisation of the \ref ElemTypeHolder class when the number of /// elements inside the tuple is bigger than 1. It recursively calls itself to /// detect the type of each element in the tuple /// \tparam T : the type of the first element in the tuple. /// \tparam Ts... the rest of the elements in the tuple. Ts... can be empty. /// \tparam K is the Kth element in the tuple template <size_t k, class T, class... Ts> struct ElemTypeHolder<k, Tuple<T, Ts...> > { typedef typename ElemTypeHolder<k - 1, Tuple<Ts...> >::type type; }; /// get /// \brief Extracts the first element from the tuple. /// K=0 represents the first element of the tuple. The tuple cannot be empty. /// \tparam Ts... are the type of the elements in the tuple. /// \param t is the tuple whose contents to extract /// \return typename ElemTypeHolder<0, Tuple<Ts...> >::type &>::type #define TERMINATE_CONDS_TUPLE_GET(CVQual) \ template <size_t k, class... Ts> \ typename StaticIf<k == 0, CVQual typename ElemTypeHolder<0, Tuple<Ts...> >::type &>::type \ get(CVQual Tuple<Ts...> &t) { \ static_assert(sizeof...(Ts)!=0, "The requseted value is bigger than the size of the tuple"); \ return t.head; \ } TERMINATE_CONDS_TUPLE_GET(const) TERMINATE_CONDS_TUPLE_GET() #undef TERMINATE_CONDS_TUPLE_GET /// get /// \brief Extracts the Kth element from the tuple. ///\tparam K is an integer value in [0,sizeof...(Types)). /// \tparam T is the (sizeof...(Types) -(K+1)) element in the tuple /// \tparam Ts... are the type of the elements in the tuple. /// \param t is the tuple whose contents to extract /// \return typename ElemTypeHolder<K, Tuple<Ts...> >::type &>::type #define RECURSIVE_TUPLE_GET(CVQual) \ template <size_t k, class T, class... Ts> \ typename StaticIf<k != 0, CVQual typename ElemTypeHolder<k, Tuple<T, Ts...> >::type &>::type \ get(CVQual Tuple<T, Ts...> &t) { \ return utility::tuple::get<k - 1>(t.tail); \ } RECURSIVE_TUPLE_GET(const) RECURSIVE_TUPLE_GET() #undef RECURSIVE_TUPLE_GET /// make_tuple /// \brief Creates a tuple object, deducing the target type from the types of /// arguments. /// \tparam Args the type of the arguments to construct the tuple from /// \param args zero or more arguments to construct the tuple from /// \return Tuple<Args...> template <typename... Args> Tuple<Args...> make_tuple(Args... args) { return Tuple<Args...>(args...); } /// size /// \brief Provides access to the number of elements in a tuple as a /// compile-time constant expression. /// \tparam Args the type of the arguments to construct the tuple from /// \return size_t template <typename... Args> static constexpr size_t size(Tuple<Args...> &) { return sizeof...(Args); } /// \struct IndexList /// \brief Creates a list of index from the elements in the tuple /// \tparam Is... a list of index from [0 to sizeof...(tuple elements)) template <size_t... Is> struct IndexList {}; /// \struct RangeBuilder /// \brief Collects internal details for generating index ranges [MIN, MAX) /// Declare primary template for index range builder /// \tparam MIN is the starting index in the tuple /// \tparam N represents sizeof..(elemens)- sizeof...(Is) /// \tparam Is... are the list of generated index so far template <size_t MIN, size_t N, size_t... Is> struct RangeBuilder; /// \brief base Step: Specialisation of the \ref RangeBuilder when the /// MIN==MAX. In this case the Is... is [0 to sizeof...(tuple elements)) /// \tparam MIN is the starting index of the tuple /// \tparam Is is [0 to sizeof...(tuple elements)) template <size_t MIN, size_t... Is> struct RangeBuilder<MIN, MIN, Is...> { typedef IndexList<Is...> type; }; /// Induction step: Specialisation of the RangeBuilder class when N!=MIN /// in this case we are recursively subtracting N by one and adding one /// index to Is... list until MIN==N /// \tparam MIN is the starting index in the tuple /// \tparam N represents sizeof..(elemens)- sizeof...(Is) /// \tparam Is... are the list of generated index so far template <size_t MIN, size_t N, size_t... Is> struct RangeBuilder : public RangeBuilder<MIN, N - 1, N - 1, Is...> {}; /// \brief IndexRange that returns a [MIN, MAX) index range /// \tparam MIN is the starting index in the tuple /// \tparam MAX is the size of the tuple template <size_t MIN, size_t MAX> struct IndexRange: RangeBuilder<MIN, MAX>::type {}; /// append_base /// \brief unpacking the elements of the input tuple t and creating a new tuple /// by adding element a at the end of it. ///\tparam Args... the type of the elements inside the tuple t /// \tparam T the type of the new element going to be added at the end of tuple /// \tparam I... is the list of index from [0 to sizeof...(t)) /// \param t the tuple on which we want to append a. /// \param a the new elements going to be added to the tuple /// \return Tuple<Args..., T> template <typename... Args, typename T, size_t... I> Tuple<Args..., T> append_base(Tuple<Args...> t, T a,IndexList<I...>) { return utility::tuple::make_tuple(get<I>(t)..., a); } /// append /// \brief the deduction function for \ref append_base that automatically /// generate the \ref IndexRange ///\tparam Args... the type of the elements inside the tuple t /// \tparam T the type of the new element going to be added at the end of tuple /// \param t the tuple on which we want to append a. /// \param a the new elements going to be added to the tuple /// \return Tuple<Args..., T> template <typename... Args, typename T> Tuple<Args..., T> append(Tuple<Args...> t, T a) { return utility::tuple::append_base(t, a, IndexRange<0, sizeof...(Args)>()); } /// append_base /// \brief This is a specialisation of \ref append_base when we want to /// concatenate /// tuple t2 at the end of the tuple t1. Here we unpack both tuples, generate the /// IndexRange for each of them and create an output tuple T that contains both /// elements of t1 and t2. ///\tparam Args1... the type of the elements inside the tuple t1 ///\tparam Args2... the type of the elements inside the tuple t2 /// \tparam I1... is the list of index from [0 to sizeof...(t1)) /// \tparam I2... is the list of index from [0 to sizeof...(t2)) /// \param t1 is the tuple on which we want to append t2. /// \param t2 is the tuple that is going to be added on t1. /// \return Tuple<Args1..., Args2...> template <typename... Args1, typename... Args2, size_t... I1, size_t... I2> Tuple<Args1..., Args2...> append_base(Tuple<Args1...> t1, Tuple<Args2...> t2, IndexList<I1...>, IndexList<I2...>) { return utility::tuple::make_tuple(get<I1>(t1)...,get<I2>(t2)...); } /// append /// \brief deduction function for \ref append_base when we are appending tuple /// t1 by tuple t2. In this case the \ref IndexRange for both tuple are /// automatically generated. ///\tparam Args1... the type of the elements inside the tuple t1 ///\tparam Args2... the type of the elements inside the tuple t2 /// \param t1 is the tuple on which we want to append t2. /// \param t2 is the tuple that is going to be added on t1. /// \return Tuple<Args1..., Args2...> template <typename... Args1, typename... Args2> Tuple<Args1..., Args2...> append(Tuple<Args1...> t1,Tuple<Args2...> t2) { return utility::tuple::append_base(t1, t2, IndexRange<0, sizeof...(Args1)>(), IndexRange<0, sizeof...(Args2)>()); } } // tuple } // utility #endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSORSYCL_TUPLE_HPP
9,630
39.982979
115
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_TRAITS_H #define EIGEN_CXX11_TENSOR_TENSOR_TRAITS_H namespace Eigen { namespace internal { template<typename Scalar, int Options> class compute_tensor_flags { enum { is_dynamic_size_storage = 1, is_aligned = ( ((Options&DontAlign)==0) && ( #if EIGEN_MAX_STATIC_ALIGN_BYTES>0 (!is_dynamic_size_storage) #else 0 #endif | #if EIGEN_MAX_ALIGN_BYTES>0 is_dynamic_size_storage #else 0 #endif ) ), packet_access_bit = packet_traits<Scalar>::Vectorizable && is_aligned ? PacketAccessBit : 0 }; public: enum { ret = packet_access_bit }; }; template<typename Scalar_, int NumIndices_, int Options_, typename IndexType_> struct traits<Tensor<Scalar_, NumIndices_, Options_, IndexType_> > { typedef Scalar_ Scalar; typedef Dense StorageKind; typedef IndexType_ Index; static const int NumDimensions = NumIndices_; static const int Layout = Options_ & RowMajor ? RowMajor : ColMajor; enum { Options = Options_, Flags = compute_tensor_flags<Scalar_, Options_>::ret | (is_const<Scalar_>::value ? 0 : LvalueBit) }; template <typename T> struct MakePointer { typedef T* Type; }; }; template<typename Scalar_, typename Dimensions, int Options_, typename IndexType_> struct traits<TensorFixedSize<Scalar_, Dimensions, Options_, IndexType_> > { typedef Scalar_ Scalar; typedef Dense StorageKind; typedef IndexType_ Index; static const int NumDimensions = array_size<Dimensions>::value; static const int Layout = Options_ & RowMajor ? RowMajor : ColMajor; enum { Options = Options_, Flags = compute_tensor_flags<Scalar_, Options_>::ret | (is_const<Scalar_>::value ? 0: LvalueBit) }; template <typename T> struct MakePointer { typedef T* Type; }; }; template<typename PlainObjectType, int Options_, template <class> class MakePointer_> struct traits<TensorMap<PlainObjectType, Options_, MakePointer_> > : public traits<PlainObjectType> { typedef traits<PlainObjectType> BaseTraits; typedef typename BaseTraits::Scalar Scalar; typedef typename BaseTraits::StorageKind StorageKind; typedef typename BaseTraits::Index Index; static const int NumDimensions = BaseTraits::NumDimensions; static const int Layout = BaseTraits::Layout; enum { Options = Options_, Flags = BaseTraits::Flags }; template <class T> struct MakePointer { // Intermediate typedef to workaround MSVC issue. typedef MakePointer_<T> MakePointerT; typedef typename MakePointerT::Type Type; }; }; template<typename PlainObjectType> struct traits<TensorRef<PlainObjectType> > : public traits<PlainObjectType> { typedef traits<PlainObjectType> BaseTraits; typedef typename BaseTraits::Scalar Scalar; typedef typename BaseTraits::StorageKind StorageKind; typedef typename BaseTraits::Index Index; static const int NumDimensions = BaseTraits::NumDimensions; static const int Layout = BaseTraits::Layout; enum { Options = BaseTraits::Options, Flags = BaseTraits::Flags }; }; template<typename _Scalar, int NumIndices_, int Options, typename IndexType_> struct eval<Tensor<_Scalar, NumIndices_, Options, IndexType_>, Eigen::Dense> { typedef const Tensor<_Scalar, NumIndices_, Options, IndexType_>& type; }; template<typename _Scalar, int NumIndices_, int Options, typename IndexType_> struct eval<const Tensor<_Scalar, NumIndices_, Options, IndexType_>, Eigen::Dense> { typedef const Tensor<_Scalar, NumIndices_, Options, IndexType_>& type; }; template<typename Scalar_, typename Dimensions, int Options, typename IndexType_> struct eval<TensorFixedSize<Scalar_, Dimensions, Options, IndexType_>, Eigen::Dense> { typedef const TensorFixedSize<Scalar_, Dimensions, Options, IndexType_>& type; }; template<typename Scalar_, typename Dimensions, int Options, typename IndexType_> struct eval<const TensorFixedSize<Scalar_, Dimensions, Options, IndexType_>, Eigen::Dense> { typedef const TensorFixedSize<Scalar_, Dimensions, Options, IndexType_>& type; }; template<typename PlainObjectType, int Options, template <class> class MakePointer> struct eval<TensorMap<PlainObjectType, Options, MakePointer>, Eigen::Dense> { typedef const TensorMap<PlainObjectType, Options, MakePointer>& type; }; template<typename PlainObjectType, int Options, template <class> class MakePointer> struct eval<const TensorMap<PlainObjectType, Options, MakePointer>, Eigen::Dense> { typedef const TensorMap<PlainObjectType, Options, MakePointer>& type; }; template<typename PlainObjectType> struct eval<TensorRef<PlainObjectType>, Eigen::Dense> { typedef const TensorRef<PlainObjectType>& type; }; template<typename PlainObjectType> struct eval<const TensorRef<PlainObjectType>, Eigen::Dense> { typedef const TensorRef<PlainObjectType>& type; }; // TODO nested<> does not exist anymore in Eigen/Core, and it thus has to be removed in favor of ref_selector. template<typename T, int n=1, typename PlainObject = void> struct nested { typedef typename ref_selector<T>::type type; }; template <typename Scalar_, int NumIndices_, int Options_, typename IndexType_> struct nested<Tensor<Scalar_, NumIndices_, Options_, IndexType_> > { typedef const Tensor<Scalar_, NumIndices_, Options_, IndexType_>& type; }; template <typename Scalar_, int NumIndices_, int Options_, typename IndexType_> struct nested<const Tensor<Scalar_, NumIndices_, Options_, IndexType_> > { typedef const Tensor<Scalar_, NumIndices_, Options_, IndexType_>& type; }; template <typename Scalar_, typename Dimensions, int Options, typename IndexType_> struct nested<TensorFixedSize<Scalar_, Dimensions, Options, IndexType_> > { typedef const TensorFixedSize<Scalar_, Dimensions, Options, IndexType_>& type; }; template <typename Scalar_, typename Dimensions, int Options, typename IndexType_> struct nested<const TensorFixedSize<Scalar_, Dimensions, Options, IndexType_> > { typedef const TensorFixedSize<Scalar_, Dimensions, Options, IndexType_>& type; }; template <typename PlainObjectType, int Options, template <class> class MakePointer> struct nested<TensorMap<PlainObjectType, Options, MakePointer> > { typedef const TensorMap<PlainObjectType, Options, MakePointer>& type; }; template <typename PlainObjectType, int Options, template <class> class MakePointer> struct nested<const TensorMap<PlainObjectType, Options, MakePointer> > { typedef const TensorMap<PlainObjectType, Options, MakePointer>& type; }; template <typename PlainObjectType> struct nested<TensorRef<PlainObjectType> > { typedef const TensorRef<PlainObjectType>& type; }; template <typename PlainObjectType> struct nested<const TensorRef<PlainObjectType> > { typedef const TensorRef<PlainObjectType>& type; }; } // end namespace internal // Convolutional layers take in an input tensor of shape (D, R, C, B), or (D, C, // R, B), and convolve it with a set of filters, which can also be presented as // a tensor (D, K, K, M), where M is the number of filters, K is the filter // size, and each 3-dimensional tensor of size (D, K, K) is a filter. For // simplicity we assume that we always use square filters (which is usually the // case in images), hence the two Ks in the tensor dimension. It also takes in // a few additional parameters: // Stride (S): The convolution stride is the offset between locations where we // apply the filters. A larger stride means that the output will be // spatially smaller. // Padding (P): The padding we apply to the input tensor along the R and C // dimensions. This is usually used to make sure that the spatial // dimensions of the output matches our intention. // // Two types of padding are often used: // SAME: The pad value is computed so that the output will have size // R/S and C/S. // VALID: no padding is carried out. // When we do padding, the padded values at the padded locations are usually // zero. // // The output dimensions for convolution, when given all the parameters above, // are as follows: // When Padding = SAME: the output size is (B, R', C', M), where // R' = ceil(float(R) / float(S)) // C' = ceil(float(C) / float(S)) // where ceil is the ceiling function. The input tensor is padded with 0 as // needed. The number of padded rows and columns are computed as: // Pr = ((R' - 1) * S + K - R) / 2 // Pc = ((C' - 1) * S + K - C) / 2 // when the stride is 1, we have the simplified case R'=R, C'=C, Pr=Pc=(K-1)/2. // This is where SAME comes from - the output has the same size as the input has. // When Padding = VALID: the output size is computed as // R' = ceil(float(R - K + 1) / float(S)) // C' = ceil(float(C - K + 1) / float(S)) // and the number of padded rows and columns are computed in the same way as in // the SAME case. // When the stride is 1, we have the simplified case R'=R-K+1, C'=C-K+1, Pr=0, // Pc=0. typedef enum { PADDING_VALID = 1, PADDING_SAME = 2 } PaddingType; } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_TRAITS_H
9,454
33.6337
110
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/Tensor/TensorUInt128.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2015 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_UINT128_H #define EIGEN_CXX11_TENSOR_TENSOR_UINT128_H namespace Eigen { namespace internal { template <uint64_t n> struct static_val { static const uint64_t value = n; EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE operator uint64_t() const { return n; } EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static_val() { } template <typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static_val(const T& v) { eigen_assert(v == n); } }; template <typename HIGH = uint64_t, typename LOW = uint64_t> struct TensorUInt128 { HIGH high; LOW low; template<typename OTHER_HIGH, typename OTHER_LOW> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE TensorUInt128(const TensorUInt128<OTHER_HIGH, OTHER_LOW>& other) : high(other.high), low(other.low) { EIGEN_STATIC_ASSERT(sizeof(OTHER_HIGH) <= sizeof(HIGH), YOU_MADE_A_PROGRAMMING_MISTAKE); EIGEN_STATIC_ASSERT(sizeof(OTHER_LOW) <= sizeof(LOW), YOU_MADE_A_PROGRAMMING_MISTAKE); } template<typename OTHER_HIGH, typename OTHER_LOW> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE TensorUInt128& operator = (const TensorUInt128<OTHER_HIGH, OTHER_LOW>& other) { EIGEN_STATIC_ASSERT(sizeof(OTHER_HIGH) <= sizeof(HIGH), YOU_MADE_A_PROGRAMMING_MISTAKE); EIGEN_STATIC_ASSERT(sizeof(OTHER_LOW) <= sizeof(LOW), YOU_MADE_A_PROGRAMMING_MISTAKE); high = other.high; low = other.low; return *this; } template<typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE explicit TensorUInt128(const T& x) : high(0), low(x) { eigen_assert((static_cast<typename conditional<sizeof(T) == 8, uint64_t, uint32_t>::type>(x) <= NumTraits<uint64_t>::highest())); eigen_assert(x >= 0); } EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE TensorUInt128(HIGH y, LOW x) : high(y), low(x) { } EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE operator LOW() const { return low; } EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE LOW lower() const { return low; } EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE HIGH upper() const { return high; } }; template <typename HL, typename LL, typename HR, typename LR> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool operator == (const TensorUInt128<HL, LL>& lhs, const TensorUInt128<HR, LR>& rhs) { return (lhs.high == rhs.high) & (lhs.low == rhs.low); } template <typename HL, typename LL, typename HR, typename LR> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool operator != (const TensorUInt128<HL, LL>& lhs, const TensorUInt128<HR, LR>& rhs) { return (lhs.high != rhs.high) | (lhs.low != rhs.low); } template <typename HL, typename LL, typename HR, typename LR> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool operator >= (const TensorUInt128<HL, LL>& lhs, const TensorUInt128<HR, LR>& rhs) { if (lhs.high != rhs.high) { return lhs.high > rhs.high; } return lhs.low >= rhs.low; } template <typename HL, typename LL, typename HR, typename LR> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool operator < (const TensorUInt128<HL, LL>& lhs, const TensorUInt128<HR, LR>& rhs) { if (lhs.high != rhs.high) { return lhs.high < rhs.high; } return lhs.low < rhs.low; } template <typename HL, typename LL, typename HR, typename LR> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE TensorUInt128<uint64_t, uint64_t> operator + (const TensorUInt128<HL, LL>& lhs, const TensorUInt128<HR, LR>& rhs) { TensorUInt128<uint64_t, uint64_t> result(lhs.high + rhs.high, lhs.low + rhs.low); if (result.low < rhs.low) { result.high += 1; } return result; } template <typename HL, typename LL, typename HR, typename LR> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE TensorUInt128<uint64_t, uint64_t> operator - (const TensorUInt128<HL, LL>& lhs, const TensorUInt128<HR, LR>& rhs) { TensorUInt128<uint64_t, uint64_t> result(lhs.high - rhs.high, lhs.low - rhs.low); if (result.low > lhs.low) { result.high -= 1; } return result; } template <typename HL, typename LL, typename HR, typename LR> static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorUInt128<uint64_t, uint64_t> operator * (const TensorUInt128<HL, LL>& lhs, const TensorUInt128<HR, LR>& rhs) { // Split each 128-bit integer into 4 32-bit integers, and then do the // multiplications by hand as follow: // lhs a b c d // rhs e f g h // ----------- // ah bh ch dh // bg cg dg // cf df // de // The result is stored in 2 64bit integers, high and low. const uint64_t LOW = 0x00000000FFFFFFFFLL; const uint64_t HIGH = 0xFFFFFFFF00000000LL; uint64_t d = lhs.low & LOW; uint64_t c = (lhs.low & HIGH) >> 32LL; uint64_t b = lhs.high & LOW; uint64_t a = (lhs.high & HIGH) >> 32LL; uint64_t h = rhs.low & LOW; uint64_t g = (rhs.low & HIGH) >> 32LL; uint64_t f = rhs.high & LOW; uint64_t e = (rhs.high & HIGH) >> 32LL; // Compute the low 32 bits of low uint64_t acc = d * h; uint64_t low = acc & LOW; // Compute the high 32 bits of low. Add a carry every time we wrap around acc >>= 32LL; uint64_t carry = 0; uint64_t acc2 = acc + c * h; if (acc2 < acc) { carry++; } acc = acc2 + d * g; if (acc < acc2) { carry++; } low |= (acc << 32LL); // Carry forward the high bits of acc to initiate the computation of the // low 32 bits of high acc2 = (acc >> 32LL) | (carry << 32LL); carry = 0; acc = acc2 + b * h; if (acc < acc2) { carry++; } acc2 = acc + c * g; if (acc2 < acc) { carry++; } acc = acc2 + d * f; if (acc < acc2) { carry++; } uint64_t high = acc & LOW; // Start to compute the high 32 bits of high. acc2 = (acc >> 32LL) | (carry << 32LL); acc = acc2 + a * h; acc2 = acc + b * g; acc = acc2 + c * f; acc2 = acc + d * e; high |= (acc2 << 32LL); return TensorUInt128<uint64_t, uint64_t>(high, low); } template <typename HL, typename LL, typename HR, typename LR> static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorUInt128<uint64_t, uint64_t> operator / (const TensorUInt128<HL, LL>& lhs, const TensorUInt128<HR, LR>& rhs) { if (rhs == TensorUInt128<static_val<0>, static_val<1> >(1)) { return TensorUInt128<uint64_t, uint64_t>(lhs.high, lhs.low); } else if (lhs < rhs) { return TensorUInt128<uint64_t, uint64_t>(0); } else { // calculate the biggest power of 2 times rhs that's less than or equal to lhs TensorUInt128<uint64_t, uint64_t> power2(1); TensorUInt128<uint64_t, uint64_t> d(rhs); TensorUInt128<uint64_t, uint64_t> tmp(lhs - d); while (lhs >= d) { tmp = tmp - d; d = d + d; power2 = power2 + power2; } tmp = TensorUInt128<uint64_t, uint64_t>(lhs.high, lhs.low); TensorUInt128<uint64_t, uint64_t> result(0); while (power2 != TensorUInt128<static_val<0>, static_val<0> >(0)) { if (tmp >= d) { tmp = tmp - d; result = result + power2; } // Shift right power2 = TensorUInt128<uint64_t, uint64_t>(power2.high >> 1, (power2.low >> 1) | (power2.high << 63)); d = TensorUInt128<uint64_t, uint64_t>(d.high >> 1, (d.low >> 1) | (d.high << 63)); } return result; } } } // namespace internal } // namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_UINT128_H
7,522
29.212851
133
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Dmitry Vyukov <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H_ #define EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H_ namespace Eigen { // EventCount allows to wait for arbitrary predicates in non-blocking // algorithms. Think of condition variable, but wait predicate does not need to // be protected by a mutex. Usage: // Waiting thread does: // // if (predicate) // return act(); // EventCount::Waiter& w = waiters[my_index]; // ec.Prewait(&w); // if (predicate) { // ec.CancelWait(&w); // return act(); // } // ec.CommitWait(&w); // // Notifying thread does: // // predicate = true; // ec.Notify(true); // // Notify is cheap if there are no waiting threads. Prewait/CommitWait are not // cheap, but they are executed only if the preceeding predicate check has // failed. // // Algorihtm outline: // There are two main variables: predicate (managed by user) and state_. // Operation closely resembles Dekker mutual algorithm: // https://en.wikipedia.org/wiki/Dekker%27s_algorithm // Waiting thread sets state_ then checks predicate, Notifying thread sets // predicate then checks state_. Due to seq_cst fences in between these // operations it is guaranteed than either waiter will see predicate change // and won't block, or notifying thread will see state_ change and will unblock // the waiter, or both. But it can't happen that both threads don't see each // other changes, which would lead to deadlock. class EventCount { public: class Waiter; EventCount(MaxSizeVector<Waiter>& waiters) : waiters_(waiters) { eigen_assert(waiters.size() < (1 << kWaiterBits) - 1); // Initialize epoch to something close to overflow to test overflow. state_ = kStackMask | (kEpochMask - kEpochInc * waiters.size() * 2); } ~EventCount() { // Ensure there are no waiters. eigen_assert((state_.load() & (kStackMask | kWaiterMask)) == kStackMask); } // Prewait prepares for waiting. // After calling this function the thread must re-check the wait predicate // and call either CancelWait or CommitWait passing the same Waiter object. void Prewait(Waiter* w) { w->epoch = state_.fetch_add(kWaiterInc, std::memory_order_relaxed); std::atomic_thread_fence(std::memory_order_seq_cst); } // CommitWait commits waiting. void CommitWait(Waiter* w) { w->state = Waiter::kNotSignaled; // Modification epoch of this waiter. uint64_t epoch = (w->epoch & kEpochMask) + (((w->epoch & kWaiterMask) >> kWaiterShift) << kEpochShift); uint64_t state = state_.load(std::memory_order_seq_cst); for (;;) { if (int64_t((state & kEpochMask) - epoch) < 0) { // The preceeding waiter has not decided on its fate. Wait until it // calls either CancelWait or CommitWait, or is notified. EIGEN_THREAD_YIELD(); state = state_.load(std::memory_order_seq_cst); continue; } // We've already been notified. if (int64_t((state & kEpochMask) - epoch) > 0) return; // Remove this thread from prewait counter and add it to the waiter list. eigen_assert((state & kWaiterMask) != 0); uint64_t newstate = state - kWaiterInc + kEpochInc; newstate = (newstate & ~kStackMask) | (w - &waiters_[0]); if ((state & kStackMask) == kStackMask) w->next.store(nullptr, std::memory_order_relaxed); else w->next.store(&waiters_[state & kStackMask], std::memory_order_relaxed); if (state_.compare_exchange_weak(state, newstate, std::memory_order_release)) break; } Park(w); } // CancelWait cancels effects of the previous Prewait call. void CancelWait(Waiter* w) { uint64_t epoch = (w->epoch & kEpochMask) + (((w->epoch & kWaiterMask) >> kWaiterShift) << kEpochShift); uint64_t state = state_.load(std::memory_order_relaxed); for (;;) { if (int64_t((state & kEpochMask) - epoch) < 0) { // The preceeding waiter has not decided on its fate. Wait until it // calls either CancelWait or CommitWait, or is notified. EIGEN_THREAD_YIELD(); state = state_.load(std::memory_order_relaxed); continue; } // We've already been notified. if (int64_t((state & kEpochMask) - epoch) > 0) return; // Remove this thread from prewait counter. eigen_assert((state & kWaiterMask) != 0); if (state_.compare_exchange_weak(state, state - kWaiterInc + kEpochInc, std::memory_order_relaxed)) return; } } // Notify wakes one or all waiting threads. // Must be called after changing the associated wait predicate. void Notify(bool all) { std::atomic_thread_fence(std::memory_order_seq_cst); uint64_t state = state_.load(std::memory_order_acquire); for (;;) { // Easy case: no waiters. if ((state & kStackMask) == kStackMask && (state & kWaiterMask) == 0) return; uint64_t waiters = (state & kWaiterMask) >> kWaiterShift; uint64_t newstate; if (all) { // Reset prewait counter and empty wait list. newstate = (state & kEpochMask) + (kEpochInc * waiters) + kStackMask; } else if (waiters) { // There is a thread in pre-wait state, unblock it. newstate = state + kEpochInc - kWaiterInc; } else { // Pop a waiter from list and unpark it. Waiter* w = &waiters_[state & kStackMask]; Waiter* wnext = w->next.load(std::memory_order_relaxed); uint64_t next = kStackMask; if (wnext != nullptr) next = wnext - &waiters_[0]; // Note: we don't add kEpochInc here. ABA problem on the lock-free stack // can't happen because a waiter is re-pushed onto the stack only after // it was in the pre-wait state which inevitably leads to epoch // increment. newstate = (state & kEpochMask) + next; } if (state_.compare_exchange_weak(state, newstate, std::memory_order_acquire)) { if (!all && waiters) return; // unblocked pre-wait thread if ((state & kStackMask) == kStackMask) return; Waiter* w = &waiters_[state & kStackMask]; if (!all) w->next.store(nullptr, std::memory_order_relaxed); Unpark(w); return; } } } class Waiter { friend class EventCount; // Align to 128 byte boundary to prevent false sharing with other Waiter objects in the same vector. EIGEN_ALIGN_TO_BOUNDARY(128) std::atomic<Waiter*> next; std::mutex mu; std::condition_variable cv; uint64_t epoch; unsigned state; enum { kNotSignaled, kWaiting, kSignaled, }; }; private: // State_ layout: // - low kStackBits is a stack of waiters committed wait. // - next kWaiterBits is count of waiters in prewait state. // - next kEpochBits is modification counter. static const uint64_t kStackBits = 16; static const uint64_t kStackMask = (1ull << kStackBits) - 1; static const uint64_t kWaiterBits = 16; static const uint64_t kWaiterShift = 16; static const uint64_t kWaiterMask = ((1ull << kWaiterBits) - 1) << kWaiterShift; static const uint64_t kWaiterInc = 1ull << kWaiterBits; static const uint64_t kEpochBits = 32; static const uint64_t kEpochShift = 32; static const uint64_t kEpochMask = ((1ull << kEpochBits) - 1) << kEpochShift; static const uint64_t kEpochInc = 1ull << kEpochShift; std::atomic<uint64_t> state_; MaxSizeVector<Waiter>& waiters_; void Park(Waiter* w) { std::unique_lock<std::mutex> lock(w->mu); while (w->state != Waiter::kSignaled) { w->state = Waiter::kWaiting; w->cv.wait(lock); } } void Unpark(Waiter* waiters) { Waiter* next = nullptr; for (Waiter* w = waiters; w; w = next) { next = w->next.load(std::memory_order_relaxed); unsigned state; { std::unique_lock<std::mutex> lock(w->mu); state = w->state; w->state = Waiter::kSignaled; } // Avoid notifying if it wasn't waiting. if (state == Waiter::kWaiting) w->cv.notify_one(); } } EventCount(const EventCount&) = delete; void operator=(const EventCount&) = delete; }; } // namespace Eigen #endif // EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H_
8,699
36.179487
104
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Dmitry Vyukov <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_THREADPOOL_NONBLOCKING_THREAD_POOL_H #define EIGEN_CXX11_THREADPOOL_NONBLOCKING_THREAD_POOL_H namespace Eigen { template <typename Environment> class NonBlockingThreadPoolTempl : public Eigen::ThreadPoolInterface { public: typedef typename Environment::Task Task; typedef RunQueue<Task, 1024> Queue; NonBlockingThreadPoolTempl(int num_threads, Environment env = Environment()) : env_(env), threads_(num_threads), queues_(num_threads), coprimes_(num_threads), waiters_(num_threads), blocked_(0), spinning_(0), done_(false), ec_(waiters_) { waiters_.resize(num_threads); // Calculate coprimes of num_threads. // Coprimes are used for a random walk over all threads in Steal // and NonEmptyQueueIndex. Iteration is based on the fact that if we take // a walk starting thread index t and calculate num_threads - 1 subsequent // indices as (t + coprime) % num_threads, we will cover all threads without // repetitions (effectively getting a presudo-random permutation of thread // indices). for (int i = 1; i <= num_threads; i++) { unsigned a = i; unsigned b = num_threads; // If GCD(a, b) == 1, then a and b are coprimes. while (b != 0) { unsigned tmp = a; a = b; b = tmp % b; } if (a == 1) { coprimes_.push_back(i); } } for (int i = 0; i < num_threads; i++) { queues_.push_back(new Queue()); } for (int i = 0; i < num_threads; i++) { threads_.push_back(env_.CreateThread([this, i]() { WorkerLoop(i); })); } } ~NonBlockingThreadPoolTempl() { done_ = true; // Now if all threads block without work, they will start exiting. // But note that threads can continue to work arbitrary long, // block, submit new work, unblock and otherwise live full life. ec_.Notify(true); // Join threads explicitly to avoid destruction order issues. for (size_t i = 0; i < threads_.size(); i++) delete threads_[i]; for (size_t i = 0; i < threads_.size(); i++) delete queues_[i]; } void Schedule(std::function<void()> fn) { Task t = env_.CreateTask(std::move(fn)); PerThread* pt = GetPerThread(); if (pt->pool == this) { // Worker thread of this pool, push onto the thread's queue. Queue* q = queues_[pt->thread_id]; t = q->PushFront(std::move(t)); } else { // A free-standing thread (or worker of another pool), push onto a random // queue. Queue* q = queues_[Rand(&pt->rand) % queues_.size()]; t = q->PushBack(std::move(t)); } // Note: below we touch this after making w available to worker threads. // Strictly speaking, this can lead to a racy-use-after-free. Consider that // Schedule is called from a thread that is neither main thread nor a worker // thread of this pool. Then, execution of w directly or indirectly // completes overall computations, which in turn leads to destruction of // this. We expect that such scenario is prevented by program, that is, // this is kept alive while any threads can potentially be in Schedule. if (!t.f) ec_.Notify(false); else env_.ExecuteTask(t); // Push failed, execute directly. } int NumThreads() const final { return static_cast<int>(threads_.size()); } int CurrentThreadId() const final { const PerThread* pt = const_cast<NonBlockingThreadPoolTempl*>(this)->GetPerThread(); if (pt->pool == this) { return pt->thread_id; } else { return -1; } } private: typedef typename Environment::EnvThread Thread; struct PerThread { constexpr PerThread() : pool(NULL), rand(0), thread_id(-1) { } NonBlockingThreadPoolTempl* pool; // Parent pool, or null for normal threads. uint64_t rand; // Random generator state. int thread_id; // Worker thread index in pool. }; Environment env_; MaxSizeVector<Thread*> threads_; MaxSizeVector<Queue*> queues_; MaxSizeVector<unsigned> coprimes_; MaxSizeVector<EventCount::Waiter> waiters_; std::atomic<unsigned> blocked_; std::atomic<bool> spinning_; std::atomic<bool> done_; EventCount ec_; // Main worker thread loop. void WorkerLoop(int thread_id) { PerThread* pt = GetPerThread(); pt->pool = this; pt->rand = std::hash<std::thread::id>()(std::this_thread::get_id()); pt->thread_id = thread_id; Queue* q = queues_[thread_id]; EventCount::Waiter* waiter = &waiters_[thread_id]; for (;;) { Task t = q->PopFront(); if (!t.f) { t = Steal(); if (!t.f) { // Leave one thread spinning. This reduces latency. // TODO(dvyukov): 1000 iterations is based on fair dice roll, tune it. // Also, the time it takes to attempt to steal work 1000 times depends // on the size of the thread pool. However the speed at which the user // of the thread pool submit tasks is independent of the size of the // pool. Consider a time based limit instead. if (!spinning_ && !spinning_.exchange(true)) { for (int i = 0; i < 1000 && !t.f; i++) { t = Steal(); } spinning_ = false; } if (!t.f) { if (!WaitForWork(waiter, &t)) { return; } } } } if (t.f) { env_.ExecuteTask(t); } } } // Steal tries to steal work from other worker threads in best-effort manner. Task Steal() { PerThread* pt = GetPerThread(); const size_t size = queues_.size(); unsigned r = Rand(&pt->rand); unsigned inc = coprimes_[r % coprimes_.size()]; unsigned victim = r % size; for (unsigned i = 0; i < size; i++) { Task t = queues_[victim]->PopBack(); if (t.f) { return t; } victim += inc; if (victim >= size) { victim -= size; } } return Task(); } // WaitForWork blocks until new work is available (returns true), or if it is // time to exit (returns false). Can optionally return a task to execute in t // (in such case t.f != nullptr on return). bool WaitForWork(EventCount::Waiter* waiter, Task* t) { eigen_assert(!t->f); // We already did best-effort emptiness check in Steal, so prepare for // blocking. ec_.Prewait(waiter); // Now do a reliable emptiness check. int victim = NonEmptyQueueIndex(); if (victim != -1) { ec_.CancelWait(waiter); *t = queues_[victim]->PopBack(); return true; } // Number of blocked threads is used as termination condition. // If we are shutting down and all worker threads blocked without work, // that's we are done. blocked_++; if (done_ && blocked_ == threads_.size()) { ec_.CancelWait(waiter); // Almost done, but need to re-check queues. // Consider that all queues are empty and all worker threads are preempted // right after incrementing blocked_ above. Now a free-standing thread // submits work and calls destructor (which sets done_). If we don't // re-check queues, we will exit leaving the work unexecuted. if (NonEmptyQueueIndex() != -1) { // Note: we must not pop from queues before we decrement blocked_, // otherwise the following scenario is possible. Consider that instead // of checking for emptiness we popped the only element from queues. // Now other worker threads can start exiting, which is bad if the // work item submits other work. So we just check emptiness here, // which ensures that all worker threads exit at the same time. blocked_--; return true; } // Reached stable termination state. ec_.Notify(true); return false; } ec_.CommitWait(waiter); blocked_--; return true; } int NonEmptyQueueIndex() { PerThread* pt = GetPerThread(); const size_t size = queues_.size(); unsigned r = Rand(&pt->rand); unsigned inc = coprimes_[r % coprimes_.size()]; unsigned victim = r % size; for (unsigned i = 0; i < size; i++) { if (!queues_[victim]->Empty()) { return victim; } victim += inc; if (victim >= size) { victim -= size; } } return -1; } static EIGEN_STRONG_INLINE PerThread* GetPerThread() { EIGEN_THREAD_LOCAL PerThread per_thread_; PerThread* pt = &per_thread_; return pt; } static EIGEN_STRONG_INLINE unsigned Rand(uint64_t* state) { uint64_t current = *state; // Update the internal state *state = current * 6364136223846793005ULL + 0xda3e39cb94b95bdbULL; // Generate the random output (using the PCG-XSH-RS scheme) return static_cast<unsigned>((current ^ (current >> 22)) >> (22 + (current >> 61))); } }; typedef NonBlockingThreadPoolTempl<StlThreadEnvironment> NonBlockingThreadPool; } // namespace Eigen #endif // EIGEN_CXX11_THREADPOOL_NONBLOCKING_THREAD_POOL_H
9,419
33.254545
88
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Dmitry Vyukov <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_THREADPOOL_RUNQUEUE_H_ #define EIGEN_CXX11_THREADPOOL_RUNQUEUE_H_ namespace Eigen { // RunQueue is a fixed-size, partially non-blocking deque or Work items. // Operations on front of the queue must be done by a single thread (owner), // operations on back of the queue can be done by multiple threads concurrently. // // Algorithm outline: // All remote threads operating on the queue back are serialized by a mutex. // This ensures that at most two threads access state: owner and one remote // thread (Size aside). The algorithm ensures that the occupied region of the // underlying array is logically continuous (can wraparound, but no stray // occupied elements). Owner operates on one end of this region, remote thread // operates on the other end. Synchronization between these threads // (potential consumption of the last element and take up of the last empty // element) happens by means of state variable in each element. States are: // empty, busy (in process of insertion of removal) and ready. Threads claim // elements (empty->busy and ready->busy transitions) by means of a CAS // operation. The finishing transition (busy->empty and busy->ready) are done // with plain store as the element is exclusively owned by the current thread. // // Note: we could permit only pointers as elements, then we would not need // separate state variable as null/non-null pointer value would serve as state, // but that would require malloc/free per operation for large, complex values // (and this is designed to store std::function<()>). template <typename Work, unsigned kSize> class RunQueue { public: RunQueue() : front_(0), back_(0) { // require power-of-two for fast masking eigen_assert((kSize & (kSize - 1)) == 0); eigen_assert(kSize > 2); // why would you do this? eigen_assert(kSize <= (64 << 10)); // leave enough space for counter for (unsigned i = 0; i < kSize; i++) array_[i].state.store(kEmpty, std::memory_order_relaxed); } ~RunQueue() { eigen_assert(Size() == 0); } // PushFront inserts w at the beginning of the queue. // If queue is full returns w, otherwise returns default-constructed Work. Work PushFront(Work w) { unsigned front = front_.load(std::memory_order_relaxed); Elem* e = &array_[front & kMask]; uint8_t s = e->state.load(std::memory_order_relaxed); if (s != kEmpty || !e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire)) return w; front_.store(front + 1 + (kSize << 1), std::memory_order_relaxed); e->w = std::move(w); e->state.store(kReady, std::memory_order_release); return Work(); } // PopFront removes and returns the first element in the queue. // If the queue was empty returns default-constructed Work. Work PopFront() { unsigned front = front_.load(std::memory_order_relaxed); Elem* e = &array_[(front - 1) & kMask]; uint8_t s = e->state.load(std::memory_order_relaxed); if (s != kReady || !e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire)) return Work(); Work w = std::move(e->w); e->state.store(kEmpty, std::memory_order_release); front = ((front - 1) & kMask2) | (front & ~kMask2); front_.store(front, std::memory_order_relaxed); return w; } // PushBack adds w at the end of the queue. // If queue is full returns w, otherwise returns default-constructed Work. Work PushBack(Work w) { std::unique_lock<std::mutex> lock(mutex_); unsigned back = back_.load(std::memory_order_relaxed); Elem* e = &array_[(back - 1) & kMask]; uint8_t s = e->state.load(std::memory_order_relaxed); if (s != kEmpty || !e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire)) return w; back = ((back - 1) & kMask2) | (back & ~kMask2); back_.store(back, std::memory_order_relaxed); e->w = std::move(w); e->state.store(kReady, std::memory_order_release); return Work(); } // PopBack removes and returns the last elements in the queue. // Can fail spuriously. Work PopBack() { if (Empty()) return Work(); std::unique_lock<std::mutex> lock(mutex_, std::try_to_lock); if (!lock) return Work(); unsigned back = back_.load(std::memory_order_relaxed); Elem* e = &array_[back & kMask]; uint8_t s = e->state.load(std::memory_order_relaxed); if (s != kReady || !e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire)) return Work(); Work w = std::move(e->w); e->state.store(kEmpty, std::memory_order_release); back_.store(back + 1 + (kSize << 1), std::memory_order_relaxed); return w; } // PopBackHalf removes and returns half last elements in the queue. // Returns number of elements removed. But can also fail spuriously. unsigned PopBackHalf(std::vector<Work>* result) { if (Empty()) return 0; std::unique_lock<std::mutex> lock(mutex_, std::try_to_lock); if (!lock) return 0; unsigned back = back_.load(std::memory_order_relaxed); unsigned size = Size(); unsigned mid = back; if (size > 1) mid = back + (size - 1) / 2; unsigned n = 0; unsigned start = 0; for (; static_cast<int>(mid - back) >= 0; mid--) { Elem* e = &array_[mid & kMask]; uint8_t s = e->state.load(std::memory_order_relaxed); if (n == 0) { if (s != kReady || !e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire)) continue; start = mid; } else { // Note: no need to store temporal kBusy, we exclusively own these // elements. eigen_assert(s == kReady); } result->push_back(std::move(e->w)); e->state.store(kEmpty, std::memory_order_release); n++; } if (n != 0) back_.store(start + 1 + (kSize << 1), std::memory_order_relaxed); return n; } // Size returns current queue size. // Can be called by any thread at any time. unsigned Size() const { // Emptiness plays critical role in thread pool blocking. So we go to great // effort to not produce false positives (claim non-empty queue as empty). for (;;) { // Capture a consistent snapshot of front/tail. unsigned front = front_.load(std::memory_order_acquire); unsigned back = back_.load(std::memory_order_acquire); unsigned front1 = front_.load(std::memory_order_relaxed); if (front != front1) continue; int size = (front & kMask2) - (back & kMask2); // Fix overflow. if (size < 0) size += 2 * kSize; // Order of modification in push/pop is crafted to make the queue look // larger than it is during concurrent modifications. E.g. pop can // decrement size before the corresponding push has incremented it. // So the computed size can be up to kSize + 1, fix it. if (size > static_cast<int>(kSize)) size = kSize; return size; } } // Empty tests whether container is empty. // Can be called by any thread at any time. bool Empty() const { return Size() == 0; } private: static const unsigned kMask = kSize - 1; static const unsigned kMask2 = (kSize << 1) - 1; struct Elem { std::atomic<uint8_t> state; Work w; }; enum { kEmpty, kBusy, kReady, }; std::mutex mutex_; // Low log(kSize) + 1 bits in front_ and back_ contain rolling index of // front/back, repsectively. The remaining bits contain modification counters // that are incremented on Push operations. This allows us to (1) distinguish // between empty and full conditions (if we would use log(kSize) bits for // position, these conditions would be indistinguishable); (2) obtain // consistent snapshot of front_/back_ for Size operation using the // modification counters. std::atomic<unsigned> front_; std::atomic<unsigned> back_; Elem array_[kSize]; RunQueue(const RunQueue&) = delete; void operator=(const RunQueue&) = delete; }; } // namespace Eigen #endif // EIGEN_CXX11_THREADPOOL_RUNQUEUE_H_
8,444
39.023697
80
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/ThreadPool/SimpleThreadPool.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_THREADPOOL_SIMPLE_THREAD_POOL_H #define EIGEN_CXX11_THREADPOOL_SIMPLE_THREAD_POOL_H namespace Eigen { // The implementation of the ThreadPool type ensures that the Schedule method // runs the functions it is provided in FIFO order when the scheduling is done // by a single thread. // Environment provides a way to create threads and also allows to intercept // task submission and execution. template <typename Environment> class SimpleThreadPoolTempl : public ThreadPoolInterface { public: // Construct a pool that contains "num_threads" threads. explicit SimpleThreadPoolTempl(int num_threads, Environment env = Environment()) : env_(env), threads_(num_threads), waiters_(num_threads) { for (int i = 0; i < num_threads; i++) { threads_.push_back(env.CreateThread([this, i]() { WorkerLoop(i); })); } } // Wait until all scheduled work has finished and then destroy the // set of threads. ~SimpleThreadPoolTempl() { { // Wait for all work to get done. std::unique_lock<std::mutex> l(mu_); while (!pending_.empty()) { empty_.wait(l); } exiting_ = true; // Wakeup all waiters. for (auto w : waiters_) { w->ready = true; w->task.f = nullptr; w->cv.notify_one(); } } // Wait for threads to finish. for (auto t : threads_) { delete t; } } // Schedule fn() for execution in the pool of threads. The functions are // executed in the order in which they are scheduled. void Schedule(std::function<void()> fn) final { Task t = env_.CreateTask(std::move(fn)); std::unique_lock<std::mutex> l(mu_); if (waiters_.empty()) { pending_.push_back(std::move(t)); } else { Waiter* w = waiters_.back(); waiters_.pop_back(); w->ready = true; w->task = std::move(t); w->cv.notify_one(); } } int NumThreads() const final { return static_cast<int>(threads_.size()); } int CurrentThreadId() const final { const PerThread* pt = this->GetPerThread(); if (pt->pool == this) { return pt->thread_id; } else { return -1; } } protected: void WorkerLoop(int thread_id) { std::unique_lock<std::mutex> l(mu_); PerThread* pt = GetPerThread(); pt->pool = this; pt->thread_id = thread_id; Waiter w; Task t; while (!exiting_) { if (pending_.empty()) { // Wait for work to be assigned to me w.ready = false; waiters_.push_back(&w); while (!w.ready) { w.cv.wait(l); } t = w.task; w.task.f = nullptr; } else { // Pick up pending work t = std::move(pending_.front()); pending_.pop_front(); if (pending_.empty()) { empty_.notify_all(); } } if (t.f) { mu_.unlock(); env_.ExecuteTask(t); t.f = nullptr; mu_.lock(); } } } private: typedef typename Environment::Task Task; typedef typename Environment::EnvThread Thread; struct Waiter { std::condition_variable cv; Task task; bool ready; }; struct PerThread { constexpr PerThread() : pool(NULL), thread_id(-1) { } SimpleThreadPoolTempl* pool; // Parent pool, or null for normal threads. int thread_id; // Worker thread index in pool. }; Environment env_; std::mutex mu_; MaxSizeVector<Thread*> threads_; // All threads MaxSizeVector<Waiter*> waiters_; // Stack of waiting threads. std::deque<Task> pending_; // Queue of pending work std::condition_variable empty_; // Signaled on pending_.empty() bool exiting_ = false; PerThread* GetPerThread() const { EIGEN_THREAD_LOCAL PerThread per_thread; return &per_thread; } }; typedef SimpleThreadPoolTempl<StlThreadEnvironment> SimpleThreadPool; } // namespace Eigen #endif // EIGEN_CXX11_THREADPOOL_SIMPLE_THREAD_POOL_H
4,323
26.896774
82
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/ThreadPool/ThreadEnvironment.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_THREADPOOL_THREAD_ENVIRONMENT_H #define EIGEN_CXX11_THREADPOOL_THREAD_ENVIRONMENT_H namespace Eigen { struct StlThreadEnvironment { struct Task { std::function<void()> f; }; // EnvThread constructor must start the thread, // destructor must join the thread. class EnvThread { public: EnvThread(std::function<void()> f) : thr_(std::move(f)) {} ~EnvThread() { thr_.join(); } private: std::thread thr_; }; EnvThread* CreateThread(std::function<void()> f) { return new EnvThread(std::move(f)); } Task CreateTask(std::function<void()> f) { return Task{std::move(f)}; } void ExecuteTask(const Task& t) { t.f(); } }; } // namespace Eigen #endif // EIGEN_CXX11_THREADPOOL_THREAD_ENVIRONMENT_H
1,120
27.74359
90
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_THREADPOOL_THREAD_LOCAL_H #define EIGEN_CXX11_THREADPOOL_THREAD_LOCAL_H // Try to come up with a portable implementation of thread local variables #if EIGEN_COMP_GNUC && EIGEN_GNUC_AT_MOST(4, 7) #define EIGEN_THREAD_LOCAL static __thread #elif EIGEN_COMP_CLANG #define EIGEN_THREAD_LOCAL static __thread #else #define EIGEN_THREAD_LOCAL static thread_local #endif #endif // EIGEN_CXX11_THREADPOOL_THREAD_LOCAL_H
801
33.869565
74
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/ThreadPool/ThreadPoolInterface.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_THREADPOOL_THREAD_POOL_INTERFACE_H #define EIGEN_CXX11_THREADPOOL_THREAD_POOL_INTERFACE_H namespace Eigen { // This defines an interface that ThreadPoolDevice can take to use // custom thread pools underneath. class ThreadPoolInterface { public: virtual void Schedule(std::function<void()> fn) = 0; // Returns the number of threads in the pool. virtual int NumThreads() const = 0; // Returns a logical thread index between 0 and NumThreads() - 1 if called // from one of the threads in the pool. Returns -1 otherwise. virtual int CurrentThreadId() const = 0; virtual ~ThreadPoolInterface() {} }; } // namespace Eigen #endif // EIGEN_CXX11_THREADPOOL_THREAD_POOL_INTERFACE_H
1,084
30.911765
76
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/ThreadPool/ThreadYield.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_THREADPOOL_THREAD_YIELD_H #define EIGEN_CXX11_THREADPOOL_THREAD_YIELD_H // Try to come up with a portable way to yield #if EIGEN_COMP_GNUC && EIGEN_GNUC_AT_MOST(4, 7) #define EIGEN_THREAD_YIELD() sched_yield() #else #define EIGEN_THREAD_YIELD() std::this_thread::yield() #endif #endif // EIGEN_CXX11_THREADPOOL_THREAD_YIELD_H
715
33.095238
69
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/util/EmulateCXX11Meta.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_EMULATE_CXX11_META_H #define EIGEN_EMULATE_CXX11_META_H namespace Eigen { namespace internal { /** \internal * \file CXX11/util/EmulateCXX11Meta.h * This file emulates a subset of the functionality provided by CXXMeta.h for * compilers that don't yet support cxx11 such as nvcc. */ struct empty_list { static const std::size_t count = 0; }; template<typename T, typename Tail=empty_list> struct type_list { typedef T HeadType; typedef Tail TailType; static const T head; static const Tail tail; static const std::size_t count = 1 + Tail::count; }; struct null_type { }; template<typename T1 = null_type, typename T2 = null_type, typename T3 = null_type, typename T4 = null_type, typename T5 = null_type, typename T6 = null_type, typename T7 = null_type, typename T8 = null_type> struct make_type_list { typedef typename make_type_list<T2, T3, T4, T5, T6, T7, T8>::type tailresult; typedef type_list<T1, tailresult> type; }; template<> struct make_type_list<> { typedef empty_list type; }; template <std::size_t index, class TList> struct get_type; template <class Head, class Tail> struct get_type<0, type_list<Head, Tail> > { typedef Head type; }; template <std::size_t i, class Head, class Tail> struct get_type<i, type_list<Head, Tail> > { typedef typename get_type<i-1, Tail>::type type; }; /* numeric list */ template <typename T, T n> struct type2val { typedef T type; static const T value = n; }; template<typename T, size_t n, T V> struct gen_numeric_list_repeated; template<typename T, T V> struct gen_numeric_list_repeated<T, 1, V> { typedef typename make_type_list<type2val<T, V> >::type type; }; template<typename T, T V> struct gen_numeric_list_repeated<T, 2, V> { typedef typename make_type_list<type2val<T, V>, type2val<T, V> >::type type; }; template<typename T, T V> struct gen_numeric_list_repeated<T, 3, V> { typedef typename make_type_list<type2val<T, V>, type2val<T, V>, type2val<T, V> >::type type; }; template<typename T, T V> struct gen_numeric_list_repeated<T, 4, V> { typedef typename make_type_list<type2val<T, V>, type2val<T, V>, type2val<T, V>, type2val<T, V> >::type type; }; template<typename T, T V> struct gen_numeric_list_repeated<T, 5, V> { typedef typename make_type_list<type2val<T, V>, type2val<T, V>, type2val<T, V>, type2val<T, V>, type2val<T, V> >::type type; }; template<typename T, T V> struct gen_numeric_list_repeated<T, 6, V> { typedef typename make_type_list<type2val<T, V>, type2val<T, V>, type2val<T, V>, type2val<T, V>, type2val<T, V>, type2val<T, V> >::type type; }; template<typename T, T V> struct gen_numeric_list_repeated<T, 7, V> { typedef typename make_type_list<type2val<T, V>, type2val<T, V>, type2val<T, V>, type2val<T, V>, type2val<T, V>, type2val<T, V>, type2val<T, V> >::type type; }; template<typename T, T V> struct gen_numeric_list_repeated<T, 8, V> { typedef typename make_type_list<type2val<T, V>, type2val<T, V>, type2val<T, V>, type2val<T, V>, type2val<T, V>, type2val<T, V>, type2val<T, V>, type2val<T, V> >::type type; }; template <std::size_t index, class NList> struct get; template <std::size_t i> struct get<i, empty_list> { get() { eigen_assert(false && "index overflow"); } typedef void type; static const char value = '\0'; }; template <std::size_t i, class Head> struct get<i, type_list<Head, empty_list> > { get() { eigen_assert(false && "index overflow"); } typedef void type; static const char value = '\0'; }; template <class Head> struct get<0, type_list<Head, empty_list> > { typedef typename Head::type type; static const type value = Head::value; }; template <class Head, class Tail> struct get<0, type_list<Head, Tail> > { typedef typename Head::type type; static const type value = Head::value; }; template <std::size_t i, class Head, class Tail> struct get<i, type_list<Head, Tail> > { typedef typename Tail::HeadType::type type; static const type value = get<i-1, Tail>::value; }; template <class NList> struct arg_prod { static const typename NList::HeadType::type value = get<0, NList>::value * arg_prod<typename NList::TailType>::value; }; template <> struct arg_prod<empty_list> { static const int value = 1; }; template<int n, typename t> array<t, n> repeat(t v) { array<t, n> array; array.fill(v); return array; } template<std::size_t I, class Head, class Tail> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Head::type array_get(type_list<Head, Tail>&) { return get<I, type_list<Head, Tail> >::value; } template<std::size_t I, class Head, class Tail> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Head::type array_get(const type_list<Head, Tail>&) { return get<I, type_list<Head, Tail> >::value; } template <class NList> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename NList::HeadType::type array_prod(const NList&) { return arg_prod<NList>::value; } template<typename t, std::size_t n> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE t array_prod(const array<t, n>& a) { t prod = 1; for (size_t i = 0; i < n; ++i) { prod *= a[i]; } return prod; } template<typename t> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE t array_prod(const array<t, 0>& /*a*/) { return 0; } template<typename t> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE t array_prod(const std::vector<t>& a) { eigen_assert(a.size() > 0); t prod = 1; for (size_t i = 0; i < a.size(); ++i) { prod *= a[i]; } return prod; } template<std::size_t I, class T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T& array_get(std::vector<T>& a) { return a[I]; } template<std::size_t I, class T> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const T& array_get(const std::vector<T>& a) { return a[I]; } struct sum_op { template<typename A, typename B> static inline bool run(A a, B b) { return a + b; } }; struct product_op { template<typename A, typename B> static inline bool run(A a, B b) { return a * b; } }; struct logical_and_op { template<typename A, typename B> static inline bool run(A a, B b) { return a && b; } }; struct logical_or_op { template<typename A, typename B> static inline bool run(A a, B b) { return a || b; } }; struct equal_op { template<typename A, typename B> static inline bool run(A a, B b) { return a == b; } }; struct not_equal_op { template<typename A, typename B> static inline bool run(A a, B b) { return a != b; } }; struct lesser_op { template<typename A, typename B> static inline bool run(A a, B b) { return a < b; } }; struct lesser_equal_op { template<typename A, typename B> static inline bool run(A a, B b) { return a <= b; } }; struct greater_op { template<typename A, typename B> static inline bool run(A a, B b) { return a > b; } }; struct greater_equal_op { template<typename A, typename B> static inline bool run(A a, B b) { return a >= b; } }; struct not_op { template<typename A> static inline bool run(A a) { return !a; } }; struct negation_op { template<typename A> static inline bool run(A a) { return -a; } }; struct greater_equal_zero_op { template<typename A> static inline bool run(A a) { return a >= 0; } }; template<typename Reducer, typename Op, typename A, std::size_t N> struct ArrayApplyAndReduce { static inline bool run(const array<A, N>& a) { EIGEN_STATIC_ASSERT(N >= 2, YOU_MADE_A_PROGRAMMING_MISTAKE); bool result = Reducer::run(Op::run(a[0]), Op::run(a[1])); for (size_t i = 2; i < N; ++i) { result = Reducer::run(result, Op::run(a[i])); } return result; } }; template<typename Reducer, typename Op, typename A> struct ArrayApplyAndReduce<Reducer, Op, A, 1> { static inline bool run(const array<A, 1>& a) { return Op::run(a[0]); } }; template<typename Reducer, typename Op, typename A, std::size_t N> inline bool array_apply_and_reduce(const array<A, N>& a) { return ArrayApplyAndReduce<Reducer, Op, A, N>::run(a); } template<typename Reducer, typename Op, typename A, typename B, std::size_t N> struct ArrayZipAndReduce { static inline bool run(const array<A, N>& a, const array<B, N>& b) { EIGEN_STATIC_ASSERT(N >= 2, YOU_MADE_A_PROGRAMMING_MISTAKE); bool result = Reducer::run(Op::run(a[0], b[0]), Op::run(a[1], b[1])); for (size_t i = 2; i < N; ++i) { result = Reducer::run(result, Op::run(a[i], b[i])); } return result; } }; template<typename Reducer, typename Op, typename A, typename B> struct ArrayZipAndReduce<Reducer, Op, A, B, 1> { static inline bool run(const array<A, 1>& a, const array<B, 1>& b) { return Op::run(a[0], b[0]); } }; template<typename Reducer, typename Op, typename A, typename B, std::size_t N> inline bool array_zip_and_reduce(const array<A, N>& a, const array<B, N>& b) { return ArrayZipAndReduce<Reducer, Op, A, B, N>::run(a, b); } } // end namespace internal } // end namespace Eigen #endif // EIGEN_EMULATE_CXX11_META_H
9,377
29.057692
126
h
abess
abess-master/python/include/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_FIXEDSIZEVECTOR_H #define EIGEN_FIXEDSIZEVECTOR_H namespace Eigen { /** \class MaxSizeVector * \ingroup Core * * \brief The MaxSizeVector class. * * The %MaxSizeVector provides a subset of std::vector functionality. * * The goal is to provide basic std::vector operations when using * std::vector is not an option (e.g. on GPU or when compiling using * FMA/AVX, as this can cause either compilation failures or illegal * instruction failures). * * Beware: The constructors are not API compatible with these of * std::vector. */ template <typename T> class MaxSizeVector { public: // Construct a new MaxSizeVector, reserve n elements. EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit MaxSizeVector(size_t n) : reserve_(n), size_(0), data_(static_cast<T*>(internal::aligned_malloc(n * sizeof(T)))) { for (size_t i = 0; i < n; ++i) { new (&data_[i]) T; } } // Construct a new MaxSizeVector, reserve and resize to n. // Copy the init value to all elements. EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE MaxSizeVector(size_t n, const T& init) : reserve_(n), size_(n), data_(static_cast<T*>(internal::aligned_malloc(n * sizeof(T)))) { for (size_t i = 0; i < n; ++i) { new (&data_[i]) T(init); } } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ~MaxSizeVector() { for (size_t i = 0; i < size_; ++i) { data_[i].~T(); } internal::aligned_free(data_); } void resize(size_t n) { eigen_assert(n <= reserve_); for (size_t i = size_; i < n; ++i) { new (&data_[i]) T; } for (size_t i = n; i < size_; ++i) { data_[i].~T(); } size_ = n; } // Append new elements (up to reserved size). EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void push_back(const T& t) { eigen_assert(size_ < reserve_); data_[size_++] = t; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const T& operator[] (size_t i) const { eigen_assert(i < size_); return data_[i]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T& operator[] (size_t i) { eigen_assert(i < size_); return data_[i]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T& back() { eigen_assert(size_ > 0); return data_[size_ - 1]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const T& back() const { eigen_assert(size_ > 0); return data_[size_ - 1]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pop_back() { // NOTE: This does not destroy the value at the end the way // std::vector's version of pop_back() does. That happens when // the Vector is destroyed. eigen_assert(size_ > 0); size_--; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t size() const { return size_; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool empty() const { return size_ == 0; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T* data() { return data_; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const T* data() const { return data_; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T* begin() { return data_; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T* end() { return data_ + size_; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const T* begin() const { return data_; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const T* end() const { return data_ + size_; } private: size_t reserve_; size_t size_; T* data_; }; } // namespace Eigen #endif // EIGEN_FIXEDSIZEVECTOR_H
3,760
25.485915
73
h
abess
abess-master/python/include/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_AUTODIFF_JACOBIAN_H #define EIGEN_AUTODIFF_JACOBIAN_H namespace Eigen { template<typename Functor> class AutoDiffJacobian : public Functor { public: AutoDiffJacobian() : Functor() {} AutoDiffJacobian(const Functor& f) : Functor(f) {} // forward constructors #if EIGEN_HAS_VARIADIC_TEMPLATES template<typename... T> AutoDiffJacobian(const T& ...Values) : Functor(Values...) {} #else template<typename T0> AutoDiffJacobian(const T0& a0) : Functor(a0) {} template<typename T0, typename T1> AutoDiffJacobian(const T0& a0, const T1& a1) : Functor(a0, a1) {} template<typename T0, typename T1, typename T2> AutoDiffJacobian(const T0& a0, const T1& a1, const T2& a2) : Functor(a0, a1, a2) {} #endif typedef typename Functor::InputType InputType; typedef typename Functor::ValueType ValueType; typedef typename ValueType::Scalar Scalar; enum { InputsAtCompileTime = InputType::RowsAtCompileTime, ValuesAtCompileTime = ValueType::RowsAtCompileTime }; typedef Matrix<Scalar, ValuesAtCompileTime, InputsAtCompileTime> JacobianType; typedef typename JacobianType::Index Index; typedef Matrix<Scalar, InputsAtCompileTime, 1> DerivativeType; typedef AutoDiffScalar<DerivativeType> ActiveScalar; typedef Matrix<ActiveScalar, InputsAtCompileTime, 1> ActiveInput; typedef Matrix<ActiveScalar, ValuesAtCompileTime, 1> ActiveValue; #if EIGEN_HAS_VARIADIC_TEMPLATES // Some compilers don't accept variadic parameters after a default parameter, // i.e., we can't just write _jac=0 but we need to overload operator(): EIGEN_STRONG_INLINE void operator() (const InputType& x, ValueType* v) const { this->operator()(x, v, 0); } template<typename... ParamsType> void operator() (const InputType& x, ValueType* v, JacobianType* _jac, const ParamsType&... Params) const #else void operator() (const InputType& x, ValueType* v, JacobianType* _jac=0) const #endif { eigen_assert(v!=0); if (!_jac) { #if EIGEN_HAS_VARIADIC_TEMPLATES Functor::operator()(x, v, Params...); #else Functor::operator()(x, v); #endif return; } JacobianType& jac = *_jac; ActiveInput ax = x.template cast<ActiveScalar>(); ActiveValue av(jac.rows()); if(InputsAtCompileTime==Dynamic) for (Index j=0; j<jac.rows(); j++) av[j].derivatives().resize(x.rows()); for (Index i=0; i<jac.cols(); i++) ax[i].derivatives() = DerivativeType::Unit(x.rows(),i); #if EIGEN_HAS_VARIADIC_TEMPLATES Functor::operator()(ax, &av, Params...); #else Functor::operator()(ax, &av); #endif for (Index i=0; i<jac.rows(); i++) { (*v)[i] = av[i].value(); jac.row(i) = av[i].derivatives(); } } }; } #endif // EIGEN_AUTODIFF_JACOBIAN_H
3,150
27.908257
85
h
abess
abess-master/python/include/unsupported/Eigen/src/FFT/ei_fftw_impl.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Mark Borgerding mark a borgerding net // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. namespace Eigen { namespace internal { // FFTW uses non-const arguments // so we must use ugly const_cast calls for all the args it uses // // This should be safe as long as // 1. we use FFTW_ESTIMATE for all our planning // see the FFTW docs section 4.3.2 "Planner Flags" // 2. fftw_complex is compatible with std::complex // This assumes std::complex<T> layout is array of size 2 with real,imag template <typename T> inline T * fftw_cast(const T* p) { return const_cast<T*>( p); } inline fftw_complex * fftw_cast( const std::complex<double> * p) { return const_cast<fftw_complex*>( reinterpret_cast<const fftw_complex*>(p) ); } inline fftwf_complex * fftw_cast( const std::complex<float> * p) { return const_cast<fftwf_complex*>( reinterpret_cast<const fftwf_complex*>(p) ); } inline fftwl_complex * fftw_cast( const std::complex<long double> * p) { return const_cast<fftwl_complex*>( reinterpret_cast<const fftwl_complex*>(p) ); } template <typename T> struct fftw_plan {}; template <> struct fftw_plan<float> { typedef float scalar_type; typedef fftwf_complex complex_type; fftwf_plan m_plan; fftw_plan() :m_plan(NULL) {} ~fftw_plan() {if (m_plan) fftwf_destroy_plan(m_plan);} inline void fwd(complex_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftwf_plan_dft_1d(nfft,src,dst, FFTW_FORWARD, FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwf_execute_dft( m_plan, src,dst); } inline void inv(complex_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftwf_plan_dft_1d(nfft,src,dst, FFTW_BACKWARD , FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwf_execute_dft( m_plan, src,dst); } inline void fwd(complex_type * dst,scalar_type * src,int nfft) { if (m_plan==NULL) m_plan = fftwf_plan_dft_r2c_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwf_execute_dft_r2c( m_plan,src,dst); } inline void inv(scalar_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftwf_plan_dft_c2r_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwf_execute_dft_c2r( m_plan, src,dst); } inline void fwd2( complex_type * dst,complex_type * src,int n0,int n1) { if (m_plan==NULL) m_plan = fftwf_plan_dft_2d(n0,n1,src,dst,FFTW_FORWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwf_execute_dft( m_plan, src,dst); } inline void inv2( complex_type * dst,complex_type * src,int n0,int n1) { if (m_plan==NULL) m_plan = fftwf_plan_dft_2d(n0,n1,src,dst,FFTW_BACKWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwf_execute_dft( m_plan, src,dst); } }; template <> struct fftw_plan<double> { typedef double scalar_type; typedef fftw_complex complex_type; ::fftw_plan m_plan; fftw_plan() :m_plan(NULL) {} ~fftw_plan() {if (m_plan) fftw_destroy_plan(m_plan);} inline void fwd(complex_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftw_plan_dft_1d(nfft,src,dst, FFTW_FORWARD, FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftw_execute_dft( m_plan, src,dst); } inline void inv(complex_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftw_plan_dft_1d(nfft,src,dst, FFTW_BACKWARD , FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftw_execute_dft( m_plan, src,dst); } inline void fwd(complex_type * dst,scalar_type * src,int nfft) { if (m_plan==NULL) m_plan = fftw_plan_dft_r2c_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftw_execute_dft_r2c( m_plan,src,dst); } inline void inv(scalar_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftw_plan_dft_c2r_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftw_execute_dft_c2r( m_plan, src,dst); } inline void fwd2( complex_type * dst,complex_type * src,int n0,int n1) { if (m_plan==NULL) m_plan = fftw_plan_dft_2d(n0,n1,src,dst,FFTW_FORWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftw_execute_dft( m_plan, src,dst); } inline void inv2( complex_type * dst,complex_type * src,int n0,int n1) { if (m_plan==NULL) m_plan = fftw_plan_dft_2d(n0,n1,src,dst,FFTW_BACKWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftw_execute_dft( m_plan, src,dst); } }; template <> struct fftw_plan<long double> { typedef long double scalar_type; typedef fftwl_complex complex_type; fftwl_plan m_plan; fftw_plan() :m_plan(NULL) {} ~fftw_plan() {if (m_plan) fftwl_destroy_plan(m_plan);} inline void fwd(complex_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftwl_plan_dft_1d(nfft,src,dst, FFTW_FORWARD, FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwl_execute_dft( m_plan, src,dst); } inline void inv(complex_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftwl_plan_dft_1d(nfft,src,dst, FFTW_BACKWARD , FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwl_execute_dft( m_plan, src,dst); } inline void fwd(complex_type * dst,scalar_type * src,int nfft) { if (m_plan==NULL) m_plan = fftwl_plan_dft_r2c_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwl_execute_dft_r2c( m_plan,src,dst); } inline void inv(scalar_type * dst,complex_type * src,int nfft) { if (m_plan==NULL) m_plan = fftwl_plan_dft_c2r_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwl_execute_dft_c2r( m_plan, src,dst); } inline void fwd2( complex_type * dst,complex_type * src,int n0,int n1) { if (m_plan==NULL) m_plan = fftwl_plan_dft_2d(n0,n1,src,dst,FFTW_FORWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwl_execute_dft( m_plan, src,dst); } inline void inv2( complex_type * dst,complex_type * src,int n0,int n1) { if (m_plan==NULL) m_plan = fftwl_plan_dft_2d(n0,n1,src,dst,FFTW_BACKWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); fftwl_execute_dft( m_plan, src,dst); } }; template <typename _Scalar> struct fftw_impl { typedef _Scalar Scalar; typedef std::complex<Scalar> Complex; inline void clear() { m_plans.clear(); } // complex-to-complex forward FFT inline void fwd( Complex * dst,const Complex *src,int nfft) { get_plan(nfft,false,dst,src).fwd(fftw_cast(dst), fftw_cast(src),nfft ); } // real-to-complex forward FFT inline void fwd( Complex * dst,const Scalar * src,int nfft) { get_plan(nfft,false,dst,src).fwd(fftw_cast(dst), fftw_cast(src) ,nfft); } // 2-d complex-to-complex inline void fwd2(Complex * dst, const Complex * src, int n0,int n1) { get_plan(n0,n1,false,dst,src).fwd2(fftw_cast(dst), fftw_cast(src) ,n0,n1); } // inverse complex-to-complex inline void inv(Complex * dst,const Complex *src,int nfft) { get_plan(nfft,true,dst,src).inv(fftw_cast(dst), fftw_cast(src),nfft ); } // half-complex to scalar inline void inv( Scalar * dst,const Complex * src,int nfft) { get_plan(nfft,true,dst,src).inv(fftw_cast(dst), fftw_cast(src),nfft ); } // 2-d complex-to-complex inline void inv2(Complex * dst, const Complex * src, int n0,int n1) { get_plan(n0,n1,true,dst,src).inv2(fftw_cast(dst), fftw_cast(src) ,n0,n1); } protected: typedef fftw_plan<Scalar> PlanData; typedef std::map<int64_t,PlanData> PlanMap; PlanMap m_plans; inline PlanData & get_plan(int nfft,bool inverse,void * dst,const void * src) { bool inplace = (dst==src); bool aligned = ( (reinterpret_cast<size_t>(src)&15) | (reinterpret_cast<size_t>(dst)&15) ) == 0; int64_t key = ( (nfft<<3 ) | (inverse<<2) | (inplace<<1) | aligned ) << 1; return m_plans[key]; } inline PlanData & get_plan(int n0,int n1,bool inverse,void * dst,const void * src) { bool inplace = (dst==src); bool aligned = ( (reinterpret_cast<size_t>(src)&15) | (reinterpret_cast<size_t>(dst)&15) ) == 0; int64_t key = ( ( (((int64_t)n0) << 30)|(n1<<3 ) | (inverse<<2) | (inplace<<1) | aligned ) << 1 ) + 1; return m_plans[key]; } }; } // end namespace internal } // end namespace Eigen /* vim: set filetype=cpp et sw=2 ts=2 ai: */
9,222
34.20229
120
h
abess
abess-master/python/include/unsupported/Eigen/src/FFT/ei_kissfft_impl.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2009 Mark Borgerding mark a borgerding net // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. namespace Eigen { namespace internal { // This FFT implementation was derived from kissfft http:sourceforge.net/projects/kissfft // Copyright 2003-2009 Mark Borgerding template <typename _Scalar> struct kiss_cpx_fft { typedef _Scalar Scalar; typedef std::complex<Scalar> Complex; std::vector<Complex> m_twiddles; std::vector<int> m_stageRadix; std::vector<int> m_stageRemainder; std::vector<Complex> m_scratchBuf; bool m_inverse; inline void make_twiddles(int nfft,bool inverse) { using std::acos; m_inverse = inverse; m_twiddles.resize(nfft); Scalar phinc = (inverse?2:-2)* acos( (Scalar) -1) / nfft; for (int i=0;i<nfft;++i) m_twiddles[i] = exp( Complex(0,i*phinc) ); } void factorize(int nfft) { //start factoring out 4's, then 2's, then 3,5,7,9,... int n= nfft; int p=4; do { while (n % p) { switch (p) { case 4: p = 2; break; case 2: p = 3; break; default: p += 2; break; } if (p*p>n) p=n;// impossible to have a factor > sqrt(n) } n /= p; m_stageRadix.push_back(p); m_stageRemainder.push_back(n); if ( p > 5 ) m_scratchBuf.resize(p); // scratchbuf will be needed in bfly_generic }while(n>1); } template <typename _Src> inline void work( int stage,Complex * xout, const _Src * xin, size_t fstride,size_t in_stride) { int p = m_stageRadix[stage]; int m = m_stageRemainder[stage]; Complex * Fout_beg = xout; Complex * Fout_end = xout + p*m; if (m>1) { do{ // recursive call: // DFT of size m*p performed by doing // p instances of smaller DFTs of size m, // each one takes a decimated version of the input work(stage+1, xout , xin, fstride*p,in_stride); xin += fstride*in_stride; }while( (xout += m) != Fout_end ); }else{ do{ *xout = *xin; xin += fstride*in_stride; }while(++xout != Fout_end ); } xout=Fout_beg; // recombine the p smaller DFTs switch (p) { case 2: bfly2(xout,fstride,m); break; case 3: bfly3(xout,fstride,m); break; case 4: bfly4(xout,fstride,m); break; case 5: bfly5(xout,fstride,m); break; default: bfly_generic(xout,fstride,m,p); break; } } inline void bfly2( Complex * Fout, const size_t fstride, int m) { for (int k=0;k<m;++k) { Complex t = Fout[m+k] * m_twiddles[k*fstride]; Fout[m+k] = Fout[k] - t; Fout[k] += t; } } inline void bfly4( Complex * Fout, const size_t fstride, const size_t m) { Complex scratch[6]; int negative_if_inverse = m_inverse * -2 +1; for (size_t k=0;k<m;++k) { scratch[0] = Fout[k+m] * m_twiddles[k*fstride]; scratch[1] = Fout[k+2*m] * m_twiddles[k*fstride*2]; scratch[2] = Fout[k+3*m] * m_twiddles[k*fstride*3]; scratch[5] = Fout[k] - scratch[1]; Fout[k] += scratch[1]; scratch[3] = scratch[0] + scratch[2]; scratch[4] = scratch[0] - scratch[2]; scratch[4] = Complex( scratch[4].imag()*negative_if_inverse , -scratch[4].real()* negative_if_inverse ); Fout[k+2*m] = Fout[k] - scratch[3]; Fout[k] += scratch[3]; Fout[k+m] = scratch[5] + scratch[4]; Fout[k+3*m] = scratch[5] - scratch[4]; } } inline void bfly3( Complex * Fout, const size_t fstride, const size_t m) { size_t k=m; const size_t m2 = 2*m; Complex *tw1,*tw2; Complex scratch[5]; Complex epi3; epi3 = m_twiddles[fstride*m]; tw1=tw2=&m_twiddles[0]; do{ scratch[1]=Fout[m] * *tw1; scratch[2]=Fout[m2] * *tw2; scratch[3]=scratch[1]+scratch[2]; scratch[0]=scratch[1]-scratch[2]; tw1 += fstride; tw2 += fstride*2; Fout[m] = Complex( Fout->real() - Scalar(.5)*scratch[3].real() , Fout->imag() - Scalar(.5)*scratch[3].imag() ); scratch[0] *= epi3.imag(); *Fout += scratch[3]; Fout[m2] = Complex( Fout[m].real() + scratch[0].imag() , Fout[m].imag() - scratch[0].real() ); Fout[m] += Complex( -scratch[0].imag(),scratch[0].real() ); ++Fout; }while(--k); } inline void bfly5( Complex * Fout, const size_t fstride, const size_t m) { Complex *Fout0,*Fout1,*Fout2,*Fout3,*Fout4; size_t u; Complex scratch[13]; Complex * twiddles = &m_twiddles[0]; Complex *tw; Complex ya,yb; ya = twiddles[fstride*m]; yb = twiddles[fstride*2*m]; Fout0=Fout; Fout1=Fout0+m; Fout2=Fout0+2*m; Fout3=Fout0+3*m; Fout4=Fout0+4*m; tw=twiddles; for ( u=0; u<m; ++u ) { scratch[0] = *Fout0; scratch[1] = *Fout1 * tw[u*fstride]; scratch[2] = *Fout2 * tw[2*u*fstride]; scratch[3] = *Fout3 * tw[3*u*fstride]; scratch[4] = *Fout4 * tw[4*u*fstride]; scratch[7] = scratch[1] + scratch[4]; scratch[10] = scratch[1] - scratch[4]; scratch[8] = scratch[2] + scratch[3]; scratch[9] = scratch[2] - scratch[3]; *Fout0 += scratch[7]; *Fout0 += scratch[8]; scratch[5] = scratch[0] + Complex( (scratch[7].real()*ya.real() ) + (scratch[8].real() *yb.real() ), (scratch[7].imag()*ya.real()) + (scratch[8].imag()*yb.real()) ); scratch[6] = Complex( (scratch[10].imag()*ya.imag()) + (scratch[9].imag()*yb.imag()), -(scratch[10].real()*ya.imag()) - (scratch[9].real()*yb.imag()) ); *Fout1 = scratch[5] - scratch[6]; *Fout4 = scratch[5] + scratch[6]; scratch[11] = scratch[0] + Complex( (scratch[7].real()*yb.real()) + (scratch[8].real()*ya.real()), (scratch[7].imag()*yb.real()) + (scratch[8].imag()*ya.real()) ); scratch[12] = Complex( -(scratch[10].imag()*yb.imag()) + (scratch[9].imag()*ya.imag()), (scratch[10].real()*yb.imag()) - (scratch[9].real()*ya.imag()) ); *Fout2=scratch[11]+scratch[12]; *Fout3=scratch[11]-scratch[12]; ++Fout0;++Fout1;++Fout2;++Fout3;++Fout4; } } /* perform the butterfly for one stage of a mixed radix FFT */ inline void bfly_generic( Complex * Fout, const size_t fstride, int m, int p ) { int u,k,q1,q; Complex * twiddles = &m_twiddles[0]; Complex t; int Norig = static_cast<int>(m_twiddles.size()); Complex * scratchbuf = &m_scratchBuf[0]; for ( u=0; u<m; ++u ) { k=u; for ( q1=0 ; q1<p ; ++q1 ) { scratchbuf[q1] = Fout[ k ]; k += m; } k=u; for ( q1=0 ; q1<p ; ++q1 ) { int twidx=0; Fout[ k ] = scratchbuf[0]; for (q=1;q<p;++q ) { twidx += static_cast<int>(fstride) * k; if (twidx>=Norig) twidx-=Norig; t=scratchbuf[q] * twiddles[twidx]; Fout[ k ] += t; } k += m; } } } }; template <typename _Scalar> struct kissfft_impl { typedef _Scalar Scalar; typedef std::complex<Scalar> Complex; void clear() { m_plans.clear(); m_realTwiddles.clear(); } inline void fwd( Complex * dst,const Complex *src,int nfft) { get_plan(nfft,false).work(0, dst, src, 1,1); } inline void fwd2( Complex * dst,const Complex *src,int n0,int n1) { EIGEN_UNUSED_VARIABLE(dst); EIGEN_UNUSED_VARIABLE(src); EIGEN_UNUSED_VARIABLE(n0); EIGEN_UNUSED_VARIABLE(n1); } inline void inv2( Complex * dst,const Complex *src,int n0,int n1) { EIGEN_UNUSED_VARIABLE(dst); EIGEN_UNUSED_VARIABLE(src); EIGEN_UNUSED_VARIABLE(n0); EIGEN_UNUSED_VARIABLE(n1); } // real-to-complex forward FFT // perform two FFTs of src even and src odd // then twiddle to recombine them into the half-spectrum format // then fill in the conjugate symmetric half inline void fwd( Complex * dst,const Scalar * src,int nfft) { if ( nfft&3 ) { // use generic mode for odd m_tmpBuf1.resize(nfft); get_plan(nfft,false).work(0, &m_tmpBuf1[0], src, 1,1); std::copy(m_tmpBuf1.begin(),m_tmpBuf1.begin()+(nfft>>1)+1,dst ); }else{ int ncfft = nfft>>1; int ncfft2 = nfft>>2; Complex * rtw = real_twiddles(ncfft2); // use optimized mode for even real fwd( dst, reinterpret_cast<const Complex*> (src), ncfft); Complex dc = dst[0].real() + dst[0].imag(); Complex nyquist = dst[0].real() - dst[0].imag(); int k; for ( k=1;k <= ncfft2 ; ++k ) { Complex fpk = dst[k]; Complex fpnk = conj(dst[ncfft-k]); Complex f1k = fpk + fpnk; Complex f2k = fpk - fpnk; Complex tw= f2k * rtw[k-1]; dst[k] = (f1k + tw) * Scalar(.5); dst[ncfft-k] = conj(f1k -tw)*Scalar(.5); } dst[0] = dc; dst[ncfft] = nyquist; } } // inverse complex-to-complex inline void inv(Complex * dst,const Complex *src,int nfft) { get_plan(nfft,true).work(0, dst, src, 1,1); } // half-complex to scalar inline void inv( Scalar * dst,const Complex * src,int nfft) { if (nfft&3) { m_tmpBuf1.resize(nfft); m_tmpBuf2.resize(nfft); std::copy(src,src+(nfft>>1)+1,m_tmpBuf1.begin() ); for (int k=1;k<(nfft>>1)+1;++k) m_tmpBuf1[nfft-k] = conj(m_tmpBuf1[k]); inv(&m_tmpBuf2[0],&m_tmpBuf1[0],nfft); for (int k=0;k<nfft;++k) dst[k] = m_tmpBuf2[k].real(); }else{ // optimized version for multiple of 4 int ncfft = nfft>>1; int ncfft2 = nfft>>2; Complex * rtw = real_twiddles(ncfft2); m_tmpBuf1.resize(ncfft); m_tmpBuf1[0] = Complex( src[0].real() + src[ncfft].real(), src[0].real() - src[ncfft].real() ); for (int k = 1; k <= ncfft / 2; ++k) { Complex fk = src[k]; Complex fnkc = conj(src[ncfft-k]); Complex fek = fk + fnkc; Complex tmp = fk - fnkc; Complex fok = tmp * conj(rtw[k-1]); m_tmpBuf1[k] = fek + fok; m_tmpBuf1[ncfft-k] = conj(fek - fok); } get_plan(ncfft,true).work(0, reinterpret_cast<Complex*>(dst), &m_tmpBuf1[0], 1,1); } } protected: typedef kiss_cpx_fft<Scalar> PlanData; typedef std::map<int,PlanData> PlanMap; PlanMap m_plans; std::map<int, std::vector<Complex> > m_realTwiddles; std::vector<Complex> m_tmpBuf1; std::vector<Complex> m_tmpBuf2; inline int PlanKey(int nfft, bool isinverse) const { return (nfft<<1) | int(isinverse); } inline PlanData & get_plan(int nfft, bool inverse) { // TODO look for PlanKey(nfft, ! inverse) and conjugate the twiddles PlanData & pd = m_plans[ PlanKey(nfft,inverse) ]; if ( pd.m_twiddles.size() == 0 ) { pd.make_twiddles(nfft,inverse); pd.factorize(nfft); } return pd; } inline Complex * real_twiddles(int ncfft2) { using std::acos; std::vector<Complex> & twidref = m_realTwiddles[ncfft2];// creates new if not there if ( (int)twidref.size() != ncfft2 ) { twidref.resize(ncfft2); int ncfft= ncfft2<<1; Scalar pi = acos( Scalar(-1) ); for (int k=1;k<=ncfft2;++k) twidref[k-1] = exp( Complex(0,-pi * (Scalar(k) / ncfft + Scalar(.5)) ) ); } return &twidref[0]; } }; } // end namespace internal } // end namespace Eigen /* vim: set filetype=cpp et sw=2 ts=2 ai: */
12,275
28.159145
119
h
abess
abess-master/python/include/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <[email protected]> /* NOTE The functions of this file have been adapted from the GMM++ library */ //======================================================================== // // Copyright (C) 2002-2007 Yves Renard // // This file is a part of GETFEM++ // // Getfem++ is free software; you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as // published by the Free Software Foundation; version 2.1 of the License. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // You should have received a copy of the GNU Lesser General Public // License along with this program; if not, write to the Free Software // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, // USA. // //======================================================================== #include "../../../../Eigen/src/Core/util/NonMPL2.h" #ifndef EIGEN_CONSTRAINEDCG_H #define EIGEN_CONSTRAINEDCG_H #include <Eigen/Core> namespace Eigen { namespace internal { /** \ingroup IterativeSolvers_Module * Compute the pseudo inverse of the non-square matrix C such that * \f$ CINV = (C * C^T)^{-1} * C \f$ based on a conjugate gradient method. * * This function is internally used by constrained_cg. */ template <typename CMatrix, typename CINVMatrix> void pseudo_inverse(const CMatrix &C, CINVMatrix &CINV) { // optimisable : copie de la ligne, precalcul de C * trans(C). typedef typename CMatrix::Scalar Scalar; typedef typename CMatrix::Index Index; // FIXME use sparse vectors ? typedef Matrix<Scalar,Dynamic,1> TmpVec; Index rows = C.rows(), cols = C.cols(); TmpVec d(rows), e(rows), l(cols), p(rows), q(rows), r(rows); Scalar rho, rho_1, alpha; d.setZero(); typedef Triplet<double> T; std::vector<T> tripletList; for (Index i = 0; i < rows; ++i) { d[i] = 1.0; rho = 1.0; e.setZero(); r = d; p = d; while (rho >= 1e-38) { /* conjugate gradient to compute e */ /* which is the i-th row of inv(C * trans(C)) */ l = C.transpose() * p; q = C * l; alpha = rho / p.dot(q); e += alpha * p; r += -alpha * q; rho_1 = rho; rho = r.dot(r); p = (rho/rho_1) * p + r; } l = C.transpose() * e; // l is the i-th row of CINV // FIXME add a generic "prune/filter" expression for both dense and sparse object to sparse for (Index j=0; j<l.size(); ++j) if (l[j]<1e-15) tripletList.push_back(T(i,j,l(j))); d[i] = 0.0; } CINV.setFromTriplets(tripletList.begin(), tripletList.end()); } /** \ingroup IterativeSolvers_Module * Constrained conjugate gradient * * Computes the minimum of \f$ 1/2((Ax).x) - bx \f$ under the contraint \f$ Cx \le f \f$ */ template<typename TMatrix, typename CMatrix, typename VectorX, typename VectorB, typename VectorF> void constrained_cg(const TMatrix& A, const CMatrix& C, VectorX& x, const VectorB& b, const VectorF& f, IterationController &iter) { using std::sqrt; typedef typename TMatrix::Scalar Scalar; typedef typename TMatrix::Index Index; typedef Matrix<Scalar,Dynamic,1> TmpVec; Scalar rho = 1.0, rho_1, lambda, gamma; Index xSize = x.size(); TmpVec p(xSize), q(xSize), q2(xSize), r(xSize), old_z(xSize), z(xSize), memox(xSize); std::vector<bool> satured(C.rows()); p.setZero(); iter.setRhsNorm(sqrt(b.dot(b))); // gael vect_sp(PS, b, b) if (iter.rhsNorm() == 0.0) iter.setRhsNorm(1.0); SparseMatrix<Scalar,RowMajor> CINV(C.rows(), C.cols()); pseudo_inverse(C, CINV); while(true) { // computation of residual old_z = z; memox = x; r = b; r += A * -x; z = r; bool transition = false; for (Index i = 0; i < C.rows(); ++i) { Scalar al = C.row(i).dot(x) - f.coeff(i); if (al >= -1.0E-15) { if (!satured[i]) { satured[i] = true; transition = true; } Scalar bb = CINV.row(i).dot(z); if (bb > 0.0) // FIXME: we should allow that: z += -bb * C.row(i); for (typename CMatrix::InnerIterator it(C,i); it; ++it) z.coeffRef(it.index()) -= bb*it.value(); } else satured[i] = false; } // descent direction rho_1 = rho; rho = r.dot(z); if (iter.finished(rho)) break; if (iter.noiseLevel() > 0 && transition) std::cerr << "CCG: transition\n"; if (transition || iter.first()) gamma = 0.0; else gamma = (std::max)(0.0, (rho - old_z.dot(z)) / rho_1); p = z + gamma*p; ++iter; // one dimensionnal optimization q = A * p; lambda = rho / q.dot(p); for (Index i = 0; i < C.rows(); ++i) { if (!satured[i]) { Scalar bb = C.row(i).dot(p) - f[i]; if (bb > 0.0) lambda = (std::min)(lambda, (f.coeff(i)-C.row(i).dot(x)) / bb); } } x += lambda * p; memox -= x; } } } // end namespace internal } // end namespace Eigen #endif // EIGEN_CONSTRAINEDCG_H
5,379
27.315789
95
h
abess
abess-master/python/include/unsupported/Eigen/src/IterativeSolvers/GMRES.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Gael Guennebaud <[email protected]> // Copyright (C) 2012, 2014 Kolja Brix <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GMRES_H #define EIGEN_GMRES_H namespace Eigen { namespace internal { /** * Generalized Minimal Residual Algorithm based on the * Arnoldi algorithm implemented with Householder reflections. * * Parameters: * \param mat matrix of linear system of equations * \param Rhs right hand side vector of linear system of equations * \param x on input: initial guess, on output: solution * \param precond preconditioner used * \param iters on input: maximum number of iterations to perform * on output: number of iterations performed * \param restart number of iterations for a restart * \param tol_error on input: relative residual tolerance * on output: residuum achieved * * \sa IterativeMethods::bicgstab() * * * For references, please see: * * Saad, Y. and Schultz, M. H. * GMRES: A Generalized Minimal Residual Algorithm for Solving Nonsymmetric Linear Systems. * SIAM J.Sci.Stat.Comp. 7, 1986, pp. 856 - 869. * * Saad, Y. * Iterative Methods for Sparse Linear Systems. * Society for Industrial and Applied Mathematics, Philadelphia, 2003. * * Walker, H. F. * Implementations of the GMRES method. * Comput.Phys.Comm. 53, 1989, pp. 311 - 320. * * Walker, H. F. * Implementation of the GMRES Method using Householder Transformations. * SIAM J.Sci.Stat.Comp. 9, 1988, pp. 152 - 163. * */ template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner> bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Preconditioner & precond, Index &iters, const Index &restart, typename Dest::RealScalar & tol_error) { using std::sqrt; using std::abs; typedef typename Dest::RealScalar RealScalar; typedef typename Dest::Scalar Scalar; typedef Matrix < Scalar, Dynamic, 1 > VectorType; typedef Matrix < Scalar, Dynamic, Dynamic, ColMajor> FMatrixType; RealScalar tol = tol_error; const Index maxIters = iters; iters = 0; const Index m = mat.rows(); // residual and preconditioned residual VectorType p0 = rhs - mat*x; VectorType r0 = precond.solve(p0); const RealScalar r0Norm = r0.norm(); // is initial guess already good enough? if(r0Norm == 0) { tol_error = 0; return true; } // storage for Hessenberg matrix and Householder data FMatrixType H = FMatrixType::Zero(m, restart + 1); VectorType w = VectorType::Zero(restart + 1); VectorType tau = VectorType::Zero(restart + 1); // storage for Jacobi rotations std::vector < JacobiRotation < Scalar > > G(restart); // storage for temporaries VectorType t(m), v(m), workspace(m), x_new(m); // generate first Householder vector Ref<VectorType> H0_tail = H.col(0).tail(m - 1); RealScalar beta; r0.makeHouseholder(H0_tail, tau.coeffRef(0), beta); w(0) = Scalar(beta); for (Index k = 1; k <= restart; ++k) { ++iters; v = VectorType::Unit(m, k - 1); // apply Householder reflections H_{1} ... H_{k-1} to v // TODO: use a HouseholderSequence for (Index i = k - 1; i >= 0; --i) { v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data()); } // apply matrix M to v: v = mat * v; t.noalias() = mat * v; v = precond.solve(t); // apply Householder reflections H_{k-1} ... H_{1} to v // TODO: use a HouseholderSequence for (Index i = 0; i < k; ++i) { v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data()); } if (v.tail(m - k).norm() != 0.0) { if (k <= restart) { // generate new Householder vector Ref<VectorType> Hk_tail = H.col(k).tail(m - k - 1); v.tail(m - k).makeHouseholder(Hk_tail, tau.coeffRef(k), beta); // apply Householder reflection H_{k} to v v.tail(m - k).applyHouseholderOnTheLeft(Hk_tail, tau.coeffRef(k), workspace.data()); } } if (k > 1) { for (Index i = 0; i < k - 1; ++i) { // apply old Givens rotations to v v.applyOnTheLeft(i, i + 1, G[i].adjoint()); } } if (k<m && v(k) != (Scalar) 0) { // determine next Givens rotation G[k - 1].makeGivens(v(k - 1), v(k)); // apply Givens rotation to v and w v.applyOnTheLeft(k - 1, k, G[k - 1].adjoint()); w.applyOnTheLeft(k - 1, k, G[k - 1].adjoint()); } // insert coefficients into upper matrix triangle H.col(k-1).head(k) = v.head(k); tol_error = abs(w(k)) / r0Norm; bool stop = (k==m || tol_error < tol || iters == maxIters); if (stop || k == restart) { // solve upper triangular system Ref<VectorType> y = w.head(k); H.topLeftCorner(k, k).template triangularView <Upper>().solveInPlace(y); // use Horner-like scheme to calculate solution vector x_new.setZero(); for (Index i = k - 1; i >= 0; --i) { x_new(i) += y(i); // apply Householder reflection H_{i} to x_new x_new.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data()); } x += x_new; if(stop) { return true; } else { k=0; // reset data for restart p0.noalias() = rhs - mat*x; r0 = precond.solve(p0); // clear Hessenberg matrix and Householder data H.setZero(); w.setZero(); tau.setZero(); // generate first Householder vector r0.makeHouseholder(H0_tail, tau.coeffRef(0), beta); w(0) = Scalar(beta); } } } return false; } } template< typename _MatrixType, typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> > class GMRES; namespace internal { template< typename _MatrixType, typename _Preconditioner> struct traits<GMRES<_MatrixType,_Preconditioner> > { typedef _MatrixType MatrixType; typedef _Preconditioner Preconditioner; }; } /** \ingroup IterativeLinearSolvers_Module * \brief A GMRES solver for sparse square problems * * This class allows to solve for A.x = b sparse linear problems using a generalized minimal * residual method. The vectors x and b can be either dense or sparse. * * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix. * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner * * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations * and NumTraits<Scalar>::epsilon() for the tolerance. * * This class can be used as the direct solver classes. Here is a typical usage example: * \code * int n = 10000; * VectorXd x(n), b(n); * SparseMatrix<double> A(n,n); * // fill A and b * GMRES<SparseMatrix<double> > solver(A); * x = solver.solve(b); * std::cout << "#iterations: " << solver.iterations() << std::endl; * std::cout << "estimated error: " << solver.error() << std::endl; * // update b, and solve again * x = solver.solve(b); * \endcode * * By default the iterations start with x=0 as an initial guess of the solution. * One can control the start using the solveWithGuess() method. * * GMRES can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink. * * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner */ template< typename _MatrixType, typename _Preconditioner> class GMRES : public IterativeSolverBase<GMRES<_MatrixType,_Preconditioner> > { typedef IterativeSolverBase<GMRES> Base; using Base::matrix; using Base::m_error; using Base::m_iterations; using Base::m_info; using Base::m_isInitialized; private: Index m_restart; public: using Base::_solve_impl; typedef _MatrixType MatrixType; typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::RealScalar RealScalar; typedef _Preconditioner Preconditioner; public: /** Default constructor. */ GMRES() : Base(), m_restart(30) {} /** Initialize the solver with matrix \a A for further \c Ax=b solving. * * This constructor is a shortcut for the default constructor followed * by a call to compute(). * * \warning this class stores a reference to the matrix A as well as some * precomputed values that depend on it. Therefore, if \a A is changed * this class becomes invalid. Call compute() to update it with the new * matrix A, or modify a copy of A. */ template<typename MatrixDerived> explicit GMRES(const EigenBase<MatrixDerived>& A) : Base(A.derived()), m_restart(30) {} ~GMRES() {} /** Get the number of iterations after that a restart is performed. */ Index get_restart() { return m_restart; } /** Set the number of iterations after that a restart is performed. * \param restart number of iterations for a restarti, default is 30. */ void set_restart(const Index restart) { m_restart=restart; } /** \internal */ template<typename Rhs,typename Dest> void _solve_with_guess_impl(const Rhs& b, Dest& x) const { bool failed = false; for(Index j=0; j<b.cols(); ++j) { m_iterations = Base::maxIterations(); m_error = Base::m_tolerance; typename Dest::ColXpr xj(x,j); if(!internal::gmres(matrix(), b.col(j), xj, Base::m_preconditioner, m_iterations, m_restart, m_error)) failed = true; } m_info = failed ? NumericalIssue : m_error <= Base::m_tolerance ? Success : NoConvergence; m_isInitialized = true; } /** \internal */ template<typename Rhs,typename Dest> void _solve_impl(const Rhs& b, MatrixBase<Dest> &x) const { x = b; if(x.squaredNorm() == 0) return; // Check Zero right hand side _solve_with_guess_impl(b,x.derived()); } protected: }; } // end namespace Eigen #endif // EIGEN_GMRES_H
10,442
29.357558
118
h