repo
stringlengths
1
152
file
stringlengths
14
221
code
stringlengths
501
25k
file_length
int64
501
25k
avg_line_length
float64
20
99.5
max_line_length
int64
21
134
extension_type
stringclasses
2 values
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-expm1minus-rr1-p3h2ts-div-x16.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_expm1minus_rr1_p3h2ts_div_x16( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); i += 16; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); vz0 = _mm256_max_ps(vsat_cutoff, vz0); __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias); vz1 = _mm256_max_ps(vsat_cutoff, vz1); __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); const __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2), vz0); const __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2), vz1); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc3, vt0), vc2); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc3, vt1), vc2); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vtwo); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vemo0 = _mm256_add_ps(_mm256_mul_ps(vp0, vts0), vsmo0); const __m256 vemo1 = _mm256_add_ps(_mm256_mul_ps(vp1, vts1), vsmo1); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); __m256 vy0 = _mm256_div_ps(vemo0, vepo0); __m256 vy1 = _mm256_div_ps(vemo1, vepo1); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); o += 16; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
7,459
37.25641
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-expm1minus-rr1-p3h2ts-div-x24.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_expm1minus_rr1_p3h2ts_div_x24( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 24 * sizeof(uint16_t); batch -= 24 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); i += 24; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); vz0 = _mm256_max_ps(vsat_cutoff, vz0); __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias); vz1 = _mm256_max_ps(vsat_cutoff, vz1); __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias); vz2 = _mm256_max_ps(vsat_cutoff, vz2); __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); const __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2), vz0); const __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2), vz1); const __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2), vz2); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc3, vt0), vc2); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc3, vt1), vc2); __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc3, vt2), vc2); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vtwo); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vtwo); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vemo0 = _mm256_add_ps(_mm256_mul_ps(vp0, vts0), vsmo0); const __m256 vemo1 = _mm256_add_ps(_mm256_mul_ps(vp1, vts1), vsmo1); const __m256 vemo2 = _mm256_add_ps(_mm256_mul_ps(vp2, vts2), vsmo2); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); __m256 vy0 = _mm256_div_ps(vemo0, vepo0); __m256 vy1 = _mm256_div_ps(vemo1, vepo1); __m256 vy2 = _mm256_div_ps(vemo2, vepo2); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); o += 24; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
8,794
39.529954
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-expm1minus-rr1-p3h2ts-div-x32.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_expm1minus_rr1_p3h2ts_div_x32( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); i += 32; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); vz0 = _mm256_max_ps(vsat_cutoff, vz0); __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias); vz1 = _mm256_max_ps(vsat_cutoff, vz1); __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias); vz2 = _mm256_max_ps(vsat_cutoff, vz2); __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias); vz3 = _mm256_max_ps(vsat_cutoff, vz3); __m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); const __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2), vz0); const __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2), vz1); const __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2), vz2); const __m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2), vz3); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc3, vt0), vc2); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc3, vt1), vc2); __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc3, vt2), vc2); __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc3, vt3), vc2); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vtwo); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vtwo); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vtwo); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vemo0 = _mm256_add_ps(_mm256_mul_ps(vp0, vts0), vsmo0); const __m256 vemo1 = _mm256_add_ps(_mm256_mul_ps(vp1, vts1), vsmo1); const __m256 vemo2 = _mm256_add_ps(_mm256_mul_ps(vp2, vts2), vsmo2); const __m256 vemo3 = _mm256_add_ps(_mm256_mul_ps(vp3, vts3), vsmo3); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); __m256 vy0 = _mm256_div_ps(vemo0, vepo0); __m256 vy1 = _mm256_div_ps(vemo1, vepo1); __m256 vy2 = _mm256_div_ps(vemo2, vepo2); __m256 vy3 = _mm256_div_ps(vemo3, vepo3); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); o += 32; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
10,129
41.384937
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-expm1minus-rr1-p3h2ts-div-x40.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_expm1minus_rr1_p3h2ts_div_x40( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 40 * sizeof(uint16_t); batch -= 40 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32)); i += 40; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); __m256 vz4 = _mm256_cvtph_ps(vabsx4); const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4); vz0 = _mm256_max_ps(vsat_cutoff, vz0); __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias); vz1 = _mm256_max_ps(vsat_cutoff, vz1); __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias); vz2 = _mm256_max_ps(vsat_cutoff, vz2); __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias); vz3 = _mm256_max_ps(vsat_cutoff, vz3); __m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias); vz4 = _mm256_max_ps(vsat_cutoff, vz4); __m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1); __m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23))); vn4 = _mm256_sub_ps(vn4, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1); const __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2), vz0); const __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2), vz1); const __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2), vz2); const __m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2), vz3); const __m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2), vz4); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc3, vt0), vc2); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc3, vt1), vc2); __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc3, vt2), vc2); __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc3, vt3), vc2); __m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc3, vt4), vc2); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vtwo); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vtwo); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vtwo); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vtwo); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vts4 = _mm256_mul_ps(vt4, vs4); const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one); const __m256 vemo0 = _mm256_add_ps(_mm256_mul_ps(vp0, vts0), vsmo0); const __m256 vemo1 = _mm256_add_ps(_mm256_mul_ps(vp1, vts1), vsmo1); const __m256 vemo2 = _mm256_add_ps(_mm256_mul_ps(vp2, vts2), vsmo2); const __m256 vemo3 = _mm256_add_ps(_mm256_mul_ps(vp3, vts3), vsmo3); const __m256 vemo4 = _mm256_add_ps(_mm256_mul_ps(vp4, vts4), vsmo4); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo); __m256 vy0 = _mm256_div_ps(vemo0, vepo0); __m256 vy1 = _mm256_div_ps(vemo1, vepo1); __m256 vy2 = _mm256_div_ps(vemo2, vepo2); __m256 vy3 = _mm256_div_ps(vemo3, vepo3); __m256 vy4 = _mm256_div_ps(vemo4, vepo4); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); __m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); vh4 = _mm_xor_si128(vh4, vinvsignx4); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); _mm_storeu_si128((__m128i*) (o + 32), vh4); o += 40; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
11,464
42.927203
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-expm1minus-rr1-p3h2ts-div-x48.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_expm1minus_rr1_p3h2ts_div_x48( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 48 * sizeof(uint16_t); batch -= 48 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32)); const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40)); i += 48; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask); const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); __m256 vz4 = _mm256_cvtph_ps(vabsx4); const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4); __m256 vz5 = _mm256_cvtph_ps(vabsx5); const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5); vz0 = _mm256_max_ps(vsat_cutoff, vz0); __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias); vz1 = _mm256_max_ps(vsat_cutoff, vz1); __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias); vz2 = _mm256_max_ps(vsat_cutoff, vz2); __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias); vz3 = _mm256_max_ps(vsat_cutoff, vz3); __m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias); vz4 = _mm256_max_ps(vsat_cutoff, vz4); __m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias); vz5 = _mm256_max_ps(vsat_cutoff, vz5); __m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1); __m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23))); vn4 = _mm256_sub_ps(vn4, vmagic_bias); const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1); __m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23))); vn5 = _mm256_sub_ps(vn5, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23)); const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1); vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1); const __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2), vz0); const __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2), vz1); const __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2), vz2); const __m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2), vz3); const __m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2), vz4); const __m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2), vz5); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc3, vt0), vc2); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc3, vt1), vc2); __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc3, vt2), vc2); __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc3, vt3), vc2); __m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc3, vt4), vc2); __m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc3, vt5), vc2); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vtwo); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vtwo); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vtwo); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vtwo); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vtwo); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vts4 = _mm256_mul_ps(vt4, vs4); const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one); const __m256 vts5 = _mm256_mul_ps(vt5, vs5); const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one); const __m256 vemo0 = _mm256_add_ps(_mm256_mul_ps(vp0, vts0), vsmo0); const __m256 vemo1 = _mm256_add_ps(_mm256_mul_ps(vp1, vts1), vsmo1); const __m256 vemo2 = _mm256_add_ps(_mm256_mul_ps(vp2, vts2), vsmo2); const __m256 vemo3 = _mm256_add_ps(_mm256_mul_ps(vp3, vts3), vsmo3); const __m256 vemo4 = _mm256_add_ps(_mm256_mul_ps(vp4, vts4), vsmo4); const __m256 vemo5 = _mm256_add_ps(_mm256_mul_ps(vp5, vts5), vsmo5); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo); const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo); __m256 vy0 = _mm256_div_ps(vemo0, vepo0); __m256 vy1 = _mm256_div_ps(vemo1, vepo1); __m256 vy2 = _mm256_div_ps(vemo2, vepo2); __m256 vy3 = _mm256_div_ps(vemo3, vepo3); __m256 vy4 = _mm256_div_ps(vemo4, vepo4); __m256 vy5 = _mm256_div_ps(vemo5, vepo5); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); __m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT); __m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); vh4 = _mm_xor_si128(vh4, vinvsignx4); vh5 = _mm_xor_si128(vh5, vinvsignx5); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); _mm_storeu_si128((__m128i*) (o + 32), vh4); _mm_storeu_si128((__m128i*) (o + 40), vh5); o += 48; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
12,799
44.229682
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-expm1minus-rr1-p3h2ts-div-x56.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_expm1minus_rr1_p3h2ts_div_x56( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 56 * sizeof(uint16_t); batch -= 56 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32)); const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40)); const __m128i vx6 = _mm_loadu_si128((const __m128i*) (i + 48)); i += 56; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask); const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask); const __m128i vabsx6 = _mm_or_si128(vx6, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); __m256 vz4 = _mm256_cvtph_ps(vabsx4); const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4); __m256 vz5 = _mm256_cvtph_ps(vabsx5); const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5); __m256 vz6 = _mm256_cvtph_ps(vabsx6); const __m128i vinvsignx6 = _mm_xor_si128(vx6, vabsx6); vz0 = _mm256_max_ps(vsat_cutoff, vz0); __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias); vz1 = _mm256_max_ps(vsat_cutoff, vz1); __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias); vz2 = _mm256_max_ps(vsat_cutoff, vz2); __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias); vz3 = _mm256_max_ps(vsat_cutoff, vz3); __m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias); vz4 = _mm256_max_ps(vsat_cutoff, vz4); __m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias); vz5 = _mm256_max_ps(vsat_cutoff, vz5); __m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias); vz6 = _mm256_max_ps(vsat_cutoff, vz6); __m256 vn6 = _mm256_add_ps(_mm256_mul_ps(vz6, vlog2e), vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1); __m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23))); vn4 = _mm256_sub_ps(vn4, vmagic_bias); const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1); __m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23))); vn5 = _mm256_sub_ps(vn5, vmagic_bias); const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1); __m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23))); vn6 = _mm256_sub_ps(vn6, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23)); const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23)); const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1); vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1); vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1); const __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2), vz0); const __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2), vz1); const __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2), vz2); const __m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2), vz3); const __m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2), vz4); const __m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2), vz5); const __m256 vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2), vz6); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc3, vt0), vc2); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc3, vt1), vc2); __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc3, vt2), vc2); __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc3, vt3), vc2); __m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc3, vt4), vc2); __m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc3, vt5), vc2); __m256 vp6 = _mm256_add_ps(_mm256_mul_ps(vc3, vt6), vc2); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vtwo); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vtwo); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vtwo); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vtwo); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vtwo); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vtwo); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vts4 = _mm256_mul_ps(vt4, vs4); const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one); const __m256 vts5 = _mm256_mul_ps(vt5, vs5); const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one); const __m256 vts6 = _mm256_mul_ps(vt6, vs6); const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one); const __m256 vemo0 = _mm256_add_ps(_mm256_mul_ps(vp0, vts0), vsmo0); const __m256 vemo1 = _mm256_add_ps(_mm256_mul_ps(vp1, vts1), vsmo1); const __m256 vemo2 = _mm256_add_ps(_mm256_mul_ps(vp2, vts2), vsmo2); const __m256 vemo3 = _mm256_add_ps(_mm256_mul_ps(vp3, vts3), vsmo3); const __m256 vemo4 = _mm256_add_ps(_mm256_mul_ps(vp4, vts4), vsmo4); const __m256 vemo5 = _mm256_add_ps(_mm256_mul_ps(vp5, vts5), vsmo5); const __m256 vemo6 = _mm256_add_ps(_mm256_mul_ps(vp6, vts6), vsmo6); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo); const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo); const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo); __m256 vy0 = _mm256_div_ps(vemo0, vepo0); __m256 vy1 = _mm256_div_ps(vemo1, vepo1); __m256 vy2 = _mm256_div_ps(vemo2, vepo2); __m256 vy3 = _mm256_div_ps(vemo3, vepo3); __m256 vy4 = _mm256_div_ps(vemo4, vepo4); __m256 vy5 = _mm256_div_ps(vemo5, vepo5); __m256 vy6 = _mm256_div_ps(vemo6, vepo6); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); __m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT); __m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT); __m128i vh6 = _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); vh4 = _mm_xor_si128(vh4, vinvsignx4); vh5 = _mm_xor_si128(vh5, vinvsignx5); vh6 = _mm_xor_si128(vh6, vinvsignx6); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); _mm_storeu_si128((__m128i*) (o + 32), vh4); _mm_storeu_si128((__m128i*) (o + 40), vh5); _mm_storeu_si128((__m128i*) (o + 48), vh6); o += 56; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
14,134
45.344262
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-expm1minus-rr1-p3h2ts-div-x64.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_expm1minus_rr1_p3h2ts_div_x64( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 64 * sizeof(uint16_t); batch -= 64 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32)); const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40)); const __m128i vx6 = _mm_loadu_si128((const __m128i*) (i + 48)); const __m128i vx7 = _mm_loadu_si128((const __m128i*) (i + 56)); i += 64; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask); const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask); const __m128i vabsx6 = _mm_or_si128(vx6, vsign_mask); const __m128i vabsx7 = _mm_or_si128(vx7, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); __m256 vz4 = _mm256_cvtph_ps(vabsx4); const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4); __m256 vz5 = _mm256_cvtph_ps(vabsx5); const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5); __m256 vz6 = _mm256_cvtph_ps(vabsx6); const __m128i vinvsignx6 = _mm_xor_si128(vx6, vabsx6); __m256 vz7 = _mm256_cvtph_ps(vabsx7); const __m128i vinvsignx7 = _mm_xor_si128(vx7, vabsx7); vz0 = _mm256_max_ps(vsat_cutoff, vz0); __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias); vz1 = _mm256_max_ps(vsat_cutoff, vz1); __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias); vz2 = _mm256_max_ps(vsat_cutoff, vz2); __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias); vz3 = _mm256_max_ps(vsat_cutoff, vz3); __m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias); vz4 = _mm256_max_ps(vsat_cutoff, vz4); __m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias); vz5 = _mm256_max_ps(vsat_cutoff, vz5); __m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias); vz6 = _mm256_max_ps(vsat_cutoff, vz6); __m256 vn6 = _mm256_add_ps(_mm256_mul_ps(vz6, vlog2e), vmagic_bias); vz7 = _mm256_max_ps(vsat_cutoff, vz7); __m256 vn7 = _mm256_add_ps(_mm256_mul_ps(vz7, vlog2e), vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1); __m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23))); vn4 = _mm256_sub_ps(vn4, vmagic_bias); const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1); __m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23))); vn5 = _mm256_sub_ps(vn5, vmagic_bias); const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1); __m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23))); vn6 = _mm256_sub_ps(vn6, vmagic_bias); const __m128 vn7_hi = _mm256_extractf128_ps(vn7, 1); __m256 vs7 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23))); vn7 = _mm256_sub_ps(vn7, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23)); const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23)); const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23)); const __m128 vs7_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn7_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1); vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1); vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1); vs7 = _mm256_insertf128_ps(vs7, vs7_hi, 1); const __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2), vz0); const __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2), vz1); const __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2), vz2); const __m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2), vz3); const __m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2), vz4); const __m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2), vz5); const __m256 vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2), vz6); const __m256 vt7 = _mm256_add_ps(_mm256_mul_ps(vn7, vminus_ln2), vz7); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc3, vt0), vc2); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc3, vt1), vc2); __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc3, vt2), vc2); __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc3, vt3), vc2); __m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc3, vt4), vc2); __m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc3, vt5), vc2); __m256 vp6 = _mm256_add_ps(_mm256_mul_ps(vc3, vt6), vc2); __m256 vp7 = _mm256_add_ps(_mm256_mul_ps(vc3, vt7), vc2); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vtwo); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vtwo); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vtwo); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vtwo); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vtwo); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vtwo); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vtwo); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vts4 = _mm256_mul_ps(vt4, vs4); const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one); const __m256 vts5 = _mm256_mul_ps(vt5, vs5); const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one); const __m256 vts6 = _mm256_mul_ps(vt6, vs6); const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one); const __m256 vts7 = _mm256_mul_ps(vt7, vs7); const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one); const __m256 vemo0 = _mm256_add_ps(_mm256_mul_ps(vp0, vts0), vsmo0); const __m256 vemo1 = _mm256_add_ps(_mm256_mul_ps(vp1, vts1), vsmo1); const __m256 vemo2 = _mm256_add_ps(_mm256_mul_ps(vp2, vts2), vsmo2); const __m256 vemo3 = _mm256_add_ps(_mm256_mul_ps(vp3, vts3), vsmo3); const __m256 vemo4 = _mm256_add_ps(_mm256_mul_ps(vp4, vts4), vsmo4); const __m256 vemo5 = _mm256_add_ps(_mm256_mul_ps(vp5, vts5), vsmo5); const __m256 vemo6 = _mm256_add_ps(_mm256_mul_ps(vp6, vts6), vsmo6); const __m256 vemo7 = _mm256_add_ps(_mm256_mul_ps(vp7, vts7), vsmo7); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo); const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo); const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo); const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo); __m256 vy0 = _mm256_div_ps(vemo0, vepo0); __m256 vy1 = _mm256_div_ps(vemo1, vepo1); __m256 vy2 = _mm256_div_ps(vemo2, vepo2); __m256 vy3 = _mm256_div_ps(vemo3, vepo3); __m256 vy4 = _mm256_div_ps(vemo4, vepo4); __m256 vy5 = _mm256_div_ps(vemo5, vepo5); __m256 vy6 = _mm256_div_ps(vemo6, vepo6); __m256 vy7 = _mm256_div_ps(vemo7, vepo7); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); __m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT); __m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT); __m128i vh6 = _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT); __m128i vh7 = _mm256_cvtps_ph(vy7, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); vh4 = _mm_xor_si128(vh4, vinvsignx4); vh5 = _mm_xor_si128(vh5, vinvsignx5); vh6 = _mm_xor_si128(vh6, vinvsignx6); vh7 = _mm_xor_si128(vh7, vinvsignx7); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); _mm_storeu_si128((__m128i*) (o + 32), vh4); _mm_storeu_si128((__m128i*) (o + 40), vh5); _mm_storeu_si128((__m128i*) (o + 48), vh6); _mm_storeu_si128((__m128i*) (o + 56), vh7); o += 64; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
15,469
46.308869
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-expm1minus-rr1-p3h2ts-div-x72.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_expm1minus_rr1_p3h2ts_div_x72( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 72 * sizeof(uint16_t); batch -= 72 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32)); const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40)); const __m128i vx6 = _mm_loadu_si128((const __m128i*) (i + 48)); const __m128i vx7 = _mm_loadu_si128((const __m128i*) (i + 56)); const __m128i vx8 = _mm_loadu_si128((const __m128i*) (i + 64)); i += 72; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask); const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask); const __m128i vabsx6 = _mm_or_si128(vx6, vsign_mask); const __m128i vabsx7 = _mm_or_si128(vx7, vsign_mask); const __m128i vabsx8 = _mm_or_si128(vx8, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); __m256 vz4 = _mm256_cvtph_ps(vabsx4); const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4); __m256 vz5 = _mm256_cvtph_ps(vabsx5); const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5); __m256 vz6 = _mm256_cvtph_ps(vabsx6); const __m128i vinvsignx6 = _mm_xor_si128(vx6, vabsx6); __m256 vz7 = _mm256_cvtph_ps(vabsx7); const __m128i vinvsignx7 = _mm_xor_si128(vx7, vabsx7); __m256 vz8 = _mm256_cvtph_ps(vabsx8); const __m128i vinvsignx8 = _mm_xor_si128(vx8, vabsx8); vz0 = _mm256_max_ps(vsat_cutoff, vz0); __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias); vz1 = _mm256_max_ps(vsat_cutoff, vz1); __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias); vz2 = _mm256_max_ps(vsat_cutoff, vz2); __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias); vz3 = _mm256_max_ps(vsat_cutoff, vz3); __m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias); vz4 = _mm256_max_ps(vsat_cutoff, vz4); __m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias); vz5 = _mm256_max_ps(vsat_cutoff, vz5); __m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias); vz6 = _mm256_max_ps(vsat_cutoff, vz6); __m256 vn6 = _mm256_add_ps(_mm256_mul_ps(vz6, vlog2e), vmagic_bias); vz7 = _mm256_max_ps(vsat_cutoff, vz7); __m256 vn7 = _mm256_add_ps(_mm256_mul_ps(vz7, vlog2e), vmagic_bias); vz8 = _mm256_max_ps(vsat_cutoff, vz8); __m256 vn8 = _mm256_add_ps(_mm256_mul_ps(vz8, vlog2e), vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1); __m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23))); vn4 = _mm256_sub_ps(vn4, vmagic_bias); const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1); __m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23))); vn5 = _mm256_sub_ps(vn5, vmagic_bias); const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1); __m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23))); vn6 = _mm256_sub_ps(vn6, vmagic_bias); const __m128 vn7_hi = _mm256_extractf128_ps(vn7, 1); __m256 vs7 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23))); vn7 = _mm256_sub_ps(vn7, vmagic_bias); const __m128 vn8_hi = _mm256_extractf128_ps(vn8, 1); __m256 vs8 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn8)), 23))); vn8 = _mm256_sub_ps(vn8, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23)); const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23)); const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23)); const __m128 vs7_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn7_hi), 23)); const __m128 vs8_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn8_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1); vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1); vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1); vs7 = _mm256_insertf128_ps(vs7, vs7_hi, 1); vs8 = _mm256_insertf128_ps(vs8, vs8_hi, 1); const __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2), vz0); const __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2), vz1); const __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2), vz2); const __m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2), vz3); const __m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2), vz4); const __m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2), vz5); const __m256 vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2), vz6); const __m256 vt7 = _mm256_add_ps(_mm256_mul_ps(vn7, vminus_ln2), vz7); const __m256 vt8 = _mm256_add_ps(_mm256_mul_ps(vn8, vminus_ln2), vz8); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc3, vt0), vc2); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc3, vt1), vc2); __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc3, vt2), vc2); __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc3, vt3), vc2); __m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc3, vt4), vc2); __m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc3, vt5), vc2); __m256 vp6 = _mm256_add_ps(_mm256_mul_ps(vc3, vt6), vc2); __m256 vp7 = _mm256_add_ps(_mm256_mul_ps(vc3, vt7), vc2); __m256 vp8 = _mm256_add_ps(_mm256_mul_ps(vc3, vt8), vc2); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vtwo); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vtwo); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vtwo); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vtwo); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vtwo); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vtwo); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vtwo); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vtwo); vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vts4 = _mm256_mul_ps(vt4, vs4); const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one); const __m256 vts5 = _mm256_mul_ps(vt5, vs5); const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one); const __m256 vts6 = _mm256_mul_ps(vt6, vs6); const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one); const __m256 vts7 = _mm256_mul_ps(vt7, vs7); const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one); const __m256 vts8 = _mm256_mul_ps(vt8, vs8); const __m256 vsmo8 = _mm256_add_ps(vs8, vminus_one); const __m256 vemo0 = _mm256_add_ps(_mm256_mul_ps(vp0, vts0), vsmo0); const __m256 vemo1 = _mm256_add_ps(_mm256_mul_ps(vp1, vts1), vsmo1); const __m256 vemo2 = _mm256_add_ps(_mm256_mul_ps(vp2, vts2), vsmo2); const __m256 vemo3 = _mm256_add_ps(_mm256_mul_ps(vp3, vts3), vsmo3); const __m256 vemo4 = _mm256_add_ps(_mm256_mul_ps(vp4, vts4), vsmo4); const __m256 vemo5 = _mm256_add_ps(_mm256_mul_ps(vp5, vts5), vsmo5); const __m256 vemo6 = _mm256_add_ps(_mm256_mul_ps(vp6, vts6), vsmo6); const __m256 vemo7 = _mm256_add_ps(_mm256_mul_ps(vp7, vts7), vsmo7); const __m256 vemo8 = _mm256_add_ps(_mm256_mul_ps(vp8, vts8), vsmo8); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo); const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo); const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo); const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo); const __m256 vepo8 = _mm256_add_ps(vemo8, vtwo); __m256 vy0 = _mm256_div_ps(vemo0, vepo0); __m256 vy1 = _mm256_div_ps(vemo1, vepo1); __m256 vy2 = _mm256_div_ps(vemo2, vepo2); __m256 vy3 = _mm256_div_ps(vemo3, vepo3); __m256 vy4 = _mm256_div_ps(vemo4, vepo4); __m256 vy5 = _mm256_div_ps(vemo5, vepo5); __m256 vy6 = _mm256_div_ps(vemo6, vepo6); __m256 vy7 = _mm256_div_ps(vemo7, vepo7); __m256 vy8 = _mm256_div_ps(vemo8, vepo8); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); __m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT); __m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT); __m128i vh6 = _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT); __m128i vh7 = _mm256_cvtps_ph(vy7, _MM_FROUND_TO_NEAREST_INT); __m128i vh8 = _mm256_cvtps_ph(vy8, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); vh4 = _mm_xor_si128(vh4, vinvsignx4); vh5 = _mm_xor_si128(vh5, vinvsignx5); vh6 = _mm_xor_si128(vh6, vinvsignx6); vh7 = _mm_xor_si128(vh7, vinvsignx7); vh8 = _mm_xor_si128(vh8, vinvsignx8); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); _mm_storeu_si128((__m128i*) (o + 32), vh4); _mm_storeu_si128((__m128i*) (o + 40), vh5); _mm_storeu_si128((__m128i*) (o + 48), vh6); _mm_storeu_si128((__m128i*) (o + 56), vh7); _mm_storeu_si128((__m128i*) (o + 64), vh8); o += 72; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
16,804
47.151862
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-expm1minus-rr1-p3h2ts-div-x8.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_expm1minus_rr1_p3h2ts_div_x8( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
4,685
34.233083
123
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-expm1minus-rr1-p3h2ts-div-x80.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_expm1minus_rr1_p3h2ts_div_x80( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 80 * sizeof(uint16_t); batch -= 80 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32)); const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40)); const __m128i vx6 = _mm_loadu_si128((const __m128i*) (i + 48)); const __m128i vx7 = _mm_loadu_si128((const __m128i*) (i + 56)); const __m128i vx8 = _mm_loadu_si128((const __m128i*) (i + 64)); const __m128i vx9 = _mm_loadu_si128((const __m128i*) (i + 72)); i += 80; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask); const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask); const __m128i vabsx6 = _mm_or_si128(vx6, vsign_mask); const __m128i vabsx7 = _mm_or_si128(vx7, vsign_mask); const __m128i vabsx8 = _mm_or_si128(vx8, vsign_mask); const __m128i vabsx9 = _mm_or_si128(vx9, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); __m256 vz4 = _mm256_cvtph_ps(vabsx4); const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4); __m256 vz5 = _mm256_cvtph_ps(vabsx5); const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5); __m256 vz6 = _mm256_cvtph_ps(vabsx6); const __m128i vinvsignx6 = _mm_xor_si128(vx6, vabsx6); __m256 vz7 = _mm256_cvtph_ps(vabsx7); const __m128i vinvsignx7 = _mm_xor_si128(vx7, vabsx7); __m256 vz8 = _mm256_cvtph_ps(vabsx8); const __m128i vinvsignx8 = _mm_xor_si128(vx8, vabsx8); __m256 vz9 = _mm256_cvtph_ps(vabsx9); const __m128i vinvsignx9 = _mm_xor_si128(vx9, vabsx9); vz0 = _mm256_max_ps(vsat_cutoff, vz0); __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias); vz1 = _mm256_max_ps(vsat_cutoff, vz1); __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias); vz2 = _mm256_max_ps(vsat_cutoff, vz2); __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias); vz3 = _mm256_max_ps(vsat_cutoff, vz3); __m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias); vz4 = _mm256_max_ps(vsat_cutoff, vz4); __m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias); vz5 = _mm256_max_ps(vsat_cutoff, vz5); __m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias); vz6 = _mm256_max_ps(vsat_cutoff, vz6); __m256 vn6 = _mm256_add_ps(_mm256_mul_ps(vz6, vlog2e), vmagic_bias); vz7 = _mm256_max_ps(vsat_cutoff, vz7); __m256 vn7 = _mm256_add_ps(_mm256_mul_ps(vz7, vlog2e), vmagic_bias); vz8 = _mm256_max_ps(vsat_cutoff, vz8); __m256 vn8 = _mm256_add_ps(_mm256_mul_ps(vz8, vlog2e), vmagic_bias); vz9 = _mm256_max_ps(vsat_cutoff, vz9); __m256 vn9 = _mm256_add_ps(_mm256_mul_ps(vz9, vlog2e), vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1); __m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23))); vn4 = _mm256_sub_ps(vn4, vmagic_bias); const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1); __m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23))); vn5 = _mm256_sub_ps(vn5, vmagic_bias); const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1); __m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23))); vn6 = _mm256_sub_ps(vn6, vmagic_bias); const __m128 vn7_hi = _mm256_extractf128_ps(vn7, 1); __m256 vs7 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23))); vn7 = _mm256_sub_ps(vn7, vmagic_bias); const __m128 vn8_hi = _mm256_extractf128_ps(vn8, 1); __m256 vs8 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn8)), 23))); vn8 = _mm256_sub_ps(vn8, vmagic_bias); const __m128 vn9_hi = _mm256_extractf128_ps(vn9, 1); __m256 vs9 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn9)), 23))); vn9 = _mm256_sub_ps(vn9, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23)); const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23)); const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23)); const __m128 vs7_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn7_hi), 23)); const __m128 vs8_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn8_hi), 23)); const __m128 vs9_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn9_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1); vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1); vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1); vs7 = _mm256_insertf128_ps(vs7, vs7_hi, 1); vs8 = _mm256_insertf128_ps(vs8, vs8_hi, 1); vs9 = _mm256_insertf128_ps(vs9, vs9_hi, 1); const __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2), vz0); const __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2), vz1); const __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2), vz2); const __m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2), vz3); const __m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2), vz4); const __m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2), vz5); const __m256 vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2), vz6); const __m256 vt7 = _mm256_add_ps(_mm256_mul_ps(vn7, vminus_ln2), vz7); const __m256 vt8 = _mm256_add_ps(_mm256_mul_ps(vn8, vminus_ln2), vz8); const __m256 vt9 = _mm256_add_ps(_mm256_mul_ps(vn9, vminus_ln2), vz9); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc3, vt0), vc2); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc3, vt1), vc2); __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc3, vt2), vc2); __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc3, vt3), vc2); __m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc3, vt4), vc2); __m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc3, vt5), vc2); __m256 vp6 = _mm256_add_ps(_mm256_mul_ps(vc3, vt6), vc2); __m256 vp7 = _mm256_add_ps(_mm256_mul_ps(vc3, vt7), vc2); __m256 vp8 = _mm256_add_ps(_mm256_mul_ps(vc3, vt8), vc2); __m256 vp9 = _mm256_add_ps(_mm256_mul_ps(vc3, vt9), vc2); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vtwo); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vtwo); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vtwo); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vtwo); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vtwo); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vtwo); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vtwo); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vtwo); vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vtwo); vp9 = _mm256_add_ps(_mm256_mul_ps(vp9, vt9), vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vts4 = _mm256_mul_ps(vt4, vs4); const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one); const __m256 vts5 = _mm256_mul_ps(vt5, vs5); const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one); const __m256 vts6 = _mm256_mul_ps(vt6, vs6); const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one); const __m256 vts7 = _mm256_mul_ps(vt7, vs7); const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one); const __m256 vts8 = _mm256_mul_ps(vt8, vs8); const __m256 vsmo8 = _mm256_add_ps(vs8, vminus_one); const __m256 vts9 = _mm256_mul_ps(vt9, vs9); const __m256 vsmo9 = _mm256_add_ps(vs9, vminus_one); const __m256 vemo0 = _mm256_add_ps(_mm256_mul_ps(vp0, vts0), vsmo0); const __m256 vemo1 = _mm256_add_ps(_mm256_mul_ps(vp1, vts1), vsmo1); const __m256 vemo2 = _mm256_add_ps(_mm256_mul_ps(vp2, vts2), vsmo2); const __m256 vemo3 = _mm256_add_ps(_mm256_mul_ps(vp3, vts3), vsmo3); const __m256 vemo4 = _mm256_add_ps(_mm256_mul_ps(vp4, vts4), vsmo4); const __m256 vemo5 = _mm256_add_ps(_mm256_mul_ps(vp5, vts5), vsmo5); const __m256 vemo6 = _mm256_add_ps(_mm256_mul_ps(vp6, vts6), vsmo6); const __m256 vemo7 = _mm256_add_ps(_mm256_mul_ps(vp7, vts7), vsmo7); const __m256 vemo8 = _mm256_add_ps(_mm256_mul_ps(vp8, vts8), vsmo8); const __m256 vemo9 = _mm256_add_ps(_mm256_mul_ps(vp9, vts9), vsmo9); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo); const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo); const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo); const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo); const __m256 vepo8 = _mm256_add_ps(vemo8, vtwo); const __m256 vepo9 = _mm256_add_ps(vemo9, vtwo); __m256 vy0 = _mm256_div_ps(vemo0, vepo0); __m256 vy1 = _mm256_div_ps(vemo1, vepo1); __m256 vy2 = _mm256_div_ps(vemo2, vepo2); __m256 vy3 = _mm256_div_ps(vemo3, vepo3); __m256 vy4 = _mm256_div_ps(vemo4, vepo4); __m256 vy5 = _mm256_div_ps(vemo5, vepo5); __m256 vy6 = _mm256_div_ps(vemo6, vepo6); __m256 vy7 = _mm256_div_ps(vemo7, vepo7); __m256 vy8 = _mm256_div_ps(vemo8, vepo8); __m256 vy9 = _mm256_div_ps(vemo9, vepo9); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); __m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT); __m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT); __m128i vh6 = _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT); __m128i vh7 = _mm256_cvtps_ph(vy7, _MM_FROUND_TO_NEAREST_INT); __m128i vh8 = _mm256_cvtps_ph(vy8, _MM_FROUND_TO_NEAREST_INT); __m128i vh9 = _mm256_cvtps_ph(vy9, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); vh4 = _mm_xor_si128(vh4, vinvsignx4); vh5 = _mm_xor_si128(vh5, vinvsignx5); vh6 = _mm_xor_si128(vh6, vinvsignx6); vh7 = _mm_xor_si128(vh7, vinvsignx7); vh8 = _mm_xor_si128(vh8, vinvsignx8); vh9 = _mm_xor_si128(vh9, vinvsignx9); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); _mm_storeu_si128((__m128i*) (o + 32), vh4); _mm_storeu_si128((__m128i*) (o + 40), vh5); _mm_storeu_si128((__m128i*) (o + 48), vh6); _mm_storeu_si128((__m128i*) (o + 56), vh7); _mm_storeu_si128((__m128i*) (o + 64), vh8); _mm_storeu_si128((__m128i*) (o + 72), vh9); o += 80; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
18,139
47.894879
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-expm1minus-rr1-p3h2ts-rcp-x16.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_expm1minus_rr1_p3h2ts_rcp_x16( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); i += 16; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS); __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias); const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS); __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); const __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2), vz0); const __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2), vz1); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc3, vt0), vc2); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc3, vt1), vc2); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vtwo); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vemo0 = _mm256_add_ps(_mm256_mul_ps(vp0, vts0), vsmo0); const __m256 vemo1 = _mm256_add_ps(_mm256_mul_ps(vp1, vts1), vsmo1); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); __m256 vrepo0 = _mm256_rcp_ps(vepo0); __m256 vrepo1 = _mm256_rcp_ps(vepo1); __m256 vy0 = _mm256_mul_ps(vemo0, vrepo0); __m256 vy1 = _mm256_mul_ps(vemo1, vrepo1); vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0); vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); o += 16; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
7,924
37.470874
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-expm1minus-rr1-p3h2ts-rcp-x24.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_expm1minus_rr1_p3h2ts_rcp_x24( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 24 * sizeof(uint16_t); batch -= 24 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); i += 24; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS); __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias); const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS); __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias); const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS); __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); const __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2), vz0); const __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2), vz1); const __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2), vz2); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc3, vt0), vc2); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc3, vt1), vc2); __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc3, vt2), vc2); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vtwo); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vtwo); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vemo0 = _mm256_add_ps(_mm256_mul_ps(vp0, vts0), vsmo0); const __m256 vemo1 = _mm256_add_ps(_mm256_mul_ps(vp1, vts1), vsmo1); const __m256 vemo2 = _mm256_add_ps(_mm256_mul_ps(vp2, vts2), vsmo2); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); __m256 vrepo0 = _mm256_rcp_ps(vepo0); __m256 vrepo1 = _mm256_rcp_ps(vepo1); __m256 vrepo2 = _mm256_rcp_ps(vepo2); __m256 vy0 = _mm256_mul_ps(vemo0, vrepo0); __m256 vy1 = _mm256_mul_ps(vemo1, vrepo1); __m256 vy2 = _mm256_mul_ps(vemo2, vrepo2); vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0); vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1); vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); o += 24; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
9,377
39.773913
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-expm1minus-rr1-p3h2ts-rcp-x32.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_expm1minus_rr1_p3h2ts_rcp_x32( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); i += 32; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS); __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias); const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS); __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias); const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS); __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias); const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS); __m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); const __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2), vz0); const __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2), vz1); const __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2), vz2); const __m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2), vz3); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc3, vt0), vc2); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc3, vt1), vc2); __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc3, vt2), vc2); __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc3, vt3), vc2); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vtwo); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vtwo); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vtwo); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vemo0 = _mm256_add_ps(_mm256_mul_ps(vp0, vts0), vsmo0); const __m256 vemo1 = _mm256_add_ps(_mm256_mul_ps(vp1, vts1), vsmo1); const __m256 vemo2 = _mm256_add_ps(_mm256_mul_ps(vp2, vts2), vsmo2); const __m256 vemo3 = _mm256_add_ps(_mm256_mul_ps(vp3, vts3), vsmo3); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); __m256 vrepo0 = _mm256_rcp_ps(vepo0); __m256 vrepo1 = _mm256_rcp_ps(vepo1); __m256 vrepo2 = _mm256_rcp_ps(vepo2); __m256 vrepo3 = _mm256_rcp_ps(vepo3); __m256 vy0 = _mm256_mul_ps(vemo0, vrepo0); __m256 vy1 = _mm256_mul_ps(vemo1, vrepo1); __m256 vy2 = _mm256_mul_ps(vemo2, vrepo2); __m256 vy3 = _mm256_mul_ps(vemo3, vrepo3); vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0); vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1); vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2); vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); o += 32; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
10,830
41.641732
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-expm1minus-rr1-p3h2ts-rcp-x40.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_expm1minus_rr1_p3h2ts_rcp_x40( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 40 * sizeof(uint16_t); batch -= 40 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32)); i += 40; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); __m256 vz4 = _mm256_cvtph_ps(vabsx4); const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4); const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS); __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias); const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS); __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias); const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS); __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias); const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS); __m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias); const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS); __m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1); __m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23))); vn4 = _mm256_sub_ps(vn4, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1); const __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2), vz0); const __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2), vz1); const __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2), vz2); const __m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2), vz3); const __m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2), vz4); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc3, vt0), vc2); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc3, vt1), vc2); __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc3, vt2), vc2); __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc3, vt3), vc2); __m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc3, vt4), vc2); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vtwo); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vtwo); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vtwo); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vtwo); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vts4 = _mm256_mul_ps(vt4, vs4); const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one); const __m256 vemo0 = _mm256_add_ps(_mm256_mul_ps(vp0, vts0), vsmo0); const __m256 vemo1 = _mm256_add_ps(_mm256_mul_ps(vp1, vts1), vsmo1); const __m256 vemo2 = _mm256_add_ps(_mm256_mul_ps(vp2, vts2), vsmo2); const __m256 vemo3 = _mm256_add_ps(_mm256_mul_ps(vp3, vts3), vsmo3); const __m256 vemo4 = _mm256_add_ps(_mm256_mul_ps(vp4, vts4), vsmo4); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo); __m256 vrepo0 = _mm256_rcp_ps(vepo0); __m256 vrepo1 = _mm256_rcp_ps(vepo1); __m256 vrepo2 = _mm256_rcp_ps(vepo2); __m256 vrepo3 = _mm256_rcp_ps(vepo3); __m256 vrepo4 = _mm256_rcp_ps(vepo4); __m256 vy0 = _mm256_mul_ps(vemo0, vrepo0); __m256 vy1 = _mm256_mul_ps(vemo1, vrepo1); __m256 vy2 = _mm256_mul_ps(vemo2, vrepo2); __m256 vy3 = _mm256_mul_ps(vemo3, vrepo3); __m256 vy4 = _mm256_mul_ps(vemo4, vrepo4); vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0); vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1); vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2); vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3); vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); __m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); vh4 = _mm_xor_si128(vh4, vinvsignx4); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); _mm_storeu_si128((__m128i*) (o + 32), vh4); o += 40; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
12,283
43.18705
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-expm1minus-rr1-p3h2ts-rcp-x48.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_expm1minus_rr1_p3h2ts_rcp_x48( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 48 * sizeof(uint16_t); batch -= 48 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32)); const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40)); i += 48; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask); const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); __m256 vz4 = _mm256_cvtph_ps(vabsx4); const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4); __m256 vz5 = _mm256_cvtph_ps(vabsx5); const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5); const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS); __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias); const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS); __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias); const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS); __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias); const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS); __m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias); const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS); __m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias); const __m256 vm5 = _mm256_cmp_ps(vz5, vsat_cutoff, _CMP_LE_OS); __m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1); __m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23))); vn4 = _mm256_sub_ps(vn4, vmagic_bias); const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1); __m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23))); vn5 = _mm256_sub_ps(vn5, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23)); const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1); vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1); const __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2), vz0); const __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2), vz1); const __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2), vz2); const __m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2), vz3); const __m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2), vz4); const __m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2), vz5); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc3, vt0), vc2); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc3, vt1), vc2); __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc3, vt2), vc2); __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc3, vt3), vc2); __m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc3, vt4), vc2); __m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc3, vt5), vc2); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vtwo); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vtwo); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vtwo); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vtwo); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vtwo); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vts4 = _mm256_mul_ps(vt4, vs4); const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one); const __m256 vts5 = _mm256_mul_ps(vt5, vs5); const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one); const __m256 vemo0 = _mm256_add_ps(_mm256_mul_ps(vp0, vts0), vsmo0); const __m256 vemo1 = _mm256_add_ps(_mm256_mul_ps(vp1, vts1), vsmo1); const __m256 vemo2 = _mm256_add_ps(_mm256_mul_ps(vp2, vts2), vsmo2); const __m256 vemo3 = _mm256_add_ps(_mm256_mul_ps(vp3, vts3), vsmo3); const __m256 vemo4 = _mm256_add_ps(_mm256_mul_ps(vp4, vts4), vsmo4); const __m256 vemo5 = _mm256_add_ps(_mm256_mul_ps(vp5, vts5), vsmo5); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo); const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo); __m256 vrepo0 = _mm256_rcp_ps(vepo0); __m256 vrepo1 = _mm256_rcp_ps(vepo1); __m256 vrepo2 = _mm256_rcp_ps(vepo2); __m256 vrepo3 = _mm256_rcp_ps(vepo3); __m256 vrepo4 = _mm256_rcp_ps(vepo4); __m256 vrepo5 = _mm256_rcp_ps(vepo5); __m256 vy0 = _mm256_mul_ps(vemo0, vrepo0); __m256 vy1 = _mm256_mul_ps(vemo1, vrepo1); __m256 vy2 = _mm256_mul_ps(vemo2, vrepo2); __m256 vy3 = _mm256_mul_ps(vemo3, vrepo3); __m256 vy4 = _mm256_mul_ps(vemo4, vrepo4); __m256 vy5 = _mm256_mul_ps(vemo5, vrepo5); vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0); vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1); vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2); vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3); vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4); vy5 = _mm256_blendv_ps(vy5, vminus_one, vm5); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); __m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT); __m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); vh4 = _mm_xor_si128(vh4, vinvsignx4); vh5 = _mm_xor_si128(vh5, vinvsignx5); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); _mm_storeu_si128((__m128i*) (o + 32), vh4); _mm_storeu_si128((__m128i*) (o + 40), vh5); o += 48; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
13,736
44.486755
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-expm1minus-rr1-p3h2ts-rcp-x56.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_expm1minus_rr1_p3h2ts_rcp_x56( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 56 * sizeof(uint16_t); batch -= 56 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32)); const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40)); const __m128i vx6 = _mm_loadu_si128((const __m128i*) (i + 48)); i += 56; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask); const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask); const __m128i vabsx6 = _mm_or_si128(vx6, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); __m256 vz4 = _mm256_cvtph_ps(vabsx4); const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4); __m256 vz5 = _mm256_cvtph_ps(vabsx5); const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5); __m256 vz6 = _mm256_cvtph_ps(vabsx6); const __m128i vinvsignx6 = _mm_xor_si128(vx6, vabsx6); const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS); __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias); const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS); __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias); const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS); __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias); const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS); __m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias); const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS); __m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias); const __m256 vm5 = _mm256_cmp_ps(vz5, vsat_cutoff, _CMP_LE_OS); __m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias); const __m256 vm6 = _mm256_cmp_ps(vz6, vsat_cutoff, _CMP_LE_OS); __m256 vn6 = _mm256_add_ps(_mm256_mul_ps(vz6, vlog2e), vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1); __m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23))); vn4 = _mm256_sub_ps(vn4, vmagic_bias); const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1); __m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23))); vn5 = _mm256_sub_ps(vn5, vmagic_bias); const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1); __m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23))); vn6 = _mm256_sub_ps(vn6, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23)); const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23)); const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1); vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1); vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1); const __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2), vz0); const __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2), vz1); const __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2), vz2); const __m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2), vz3); const __m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2), vz4); const __m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2), vz5); const __m256 vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2), vz6); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc3, vt0), vc2); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc3, vt1), vc2); __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc3, vt2), vc2); __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc3, vt3), vc2); __m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc3, vt4), vc2); __m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc3, vt5), vc2); __m256 vp6 = _mm256_add_ps(_mm256_mul_ps(vc3, vt6), vc2); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vtwo); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vtwo); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vtwo); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vtwo); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vtwo); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vtwo); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vts4 = _mm256_mul_ps(vt4, vs4); const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one); const __m256 vts5 = _mm256_mul_ps(vt5, vs5); const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one); const __m256 vts6 = _mm256_mul_ps(vt6, vs6); const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one); const __m256 vemo0 = _mm256_add_ps(_mm256_mul_ps(vp0, vts0), vsmo0); const __m256 vemo1 = _mm256_add_ps(_mm256_mul_ps(vp1, vts1), vsmo1); const __m256 vemo2 = _mm256_add_ps(_mm256_mul_ps(vp2, vts2), vsmo2); const __m256 vemo3 = _mm256_add_ps(_mm256_mul_ps(vp3, vts3), vsmo3); const __m256 vemo4 = _mm256_add_ps(_mm256_mul_ps(vp4, vts4), vsmo4); const __m256 vemo5 = _mm256_add_ps(_mm256_mul_ps(vp5, vts5), vsmo5); const __m256 vemo6 = _mm256_add_ps(_mm256_mul_ps(vp6, vts6), vsmo6); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo); const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo); const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo); __m256 vrepo0 = _mm256_rcp_ps(vepo0); __m256 vrepo1 = _mm256_rcp_ps(vepo1); __m256 vrepo2 = _mm256_rcp_ps(vepo2); __m256 vrepo3 = _mm256_rcp_ps(vepo3); __m256 vrepo4 = _mm256_rcp_ps(vepo4); __m256 vrepo5 = _mm256_rcp_ps(vepo5); __m256 vrepo6 = _mm256_rcp_ps(vepo6); __m256 vy0 = _mm256_mul_ps(vemo0, vrepo0); __m256 vy1 = _mm256_mul_ps(vemo1, vrepo1); __m256 vy2 = _mm256_mul_ps(vemo2, vrepo2); __m256 vy3 = _mm256_mul_ps(vemo3, vrepo3); __m256 vy4 = _mm256_mul_ps(vemo4, vrepo4); __m256 vy5 = _mm256_mul_ps(vemo5, vrepo5); __m256 vy6 = _mm256_mul_ps(vemo6, vrepo6); vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0); vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1); vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2); vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3); vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4); vy5 = _mm256_blendv_ps(vy5, vminus_one, vm5); vy6 = _mm256_blendv_ps(vy6, vminus_one, vm6); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); __m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT); __m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT); __m128i vh6 = _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); vh4 = _mm_xor_si128(vh4, vinvsignx4); vh5 = _mm_xor_si128(vh5, vinvsignx5); vh6 = _mm_xor_si128(vh6, vinvsignx6); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); _mm_storeu_si128((__m128i*) (o + 32), vh4); _mm_storeu_si128((__m128i*) (o + 40), vh5); _mm_storeu_si128((__m128i*) (o + 48), vh6); o += 56; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
15,189
45.595092
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-expm1minus-rr1-p3h2ts-rcp-x64.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_expm1minus_rr1_p3h2ts_rcp_x64( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 64 * sizeof(uint16_t); batch -= 64 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32)); const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40)); const __m128i vx6 = _mm_loadu_si128((const __m128i*) (i + 48)); const __m128i vx7 = _mm_loadu_si128((const __m128i*) (i + 56)); i += 64; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask); const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask); const __m128i vabsx6 = _mm_or_si128(vx6, vsign_mask); const __m128i vabsx7 = _mm_or_si128(vx7, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); __m256 vz4 = _mm256_cvtph_ps(vabsx4); const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4); __m256 vz5 = _mm256_cvtph_ps(vabsx5); const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5); __m256 vz6 = _mm256_cvtph_ps(vabsx6); const __m128i vinvsignx6 = _mm_xor_si128(vx6, vabsx6); __m256 vz7 = _mm256_cvtph_ps(vabsx7); const __m128i vinvsignx7 = _mm_xor_si128(vx7, vabsx7); const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS); __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias); const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS); __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias); const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS); __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias); const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS); __m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias); const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS); __m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias); const __m256 vm5 = _mm256_cmp_ps(vz5, vsat_cutoff, _CMP_LE_OS); __m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias); const __m256 vm6 = _mm256_cmp_ps(vz6, vsat_cutoff, _CMP_LE_OS); __m256 vn6 = _mm256_add_ps(_mm256_mul_ps(vz6, vlog2e), vmagic_bias); const __m256 vm7 = _mm256_cmp_ps(vz7, vsat_cutoff, _CMP_LE_OS); __m256 vn7 = _mm256_add_ps(_mm256_mul_ps(vz7, vlog2e), vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1); __m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23))); vn4 = _mm256_sub_ps(vn4, vmagic_bias); const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1); __m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23))); vn5 = _mm256_sub_ps(vn5, vmagic_bias); const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1); __m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23))); vn6 = _mm256_sub_ps(vn6, vmagic_bias); const __m128 vn7_hi = _mm256_extractf128_ps(vn7, 1); __m256 vs7 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23))); vn7 = _mm256_sub_ps(vn7, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23)); const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23)); const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23)); const __m128 vs7_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn7_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1); vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1); vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1); vs7 = _mm256_insertf128_ps(vs7, vs7_hi, 1); const __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2), vz0); const __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2), vz1); const __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2), vz2); const __m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2), vz3); const __m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2), vz4); const __m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2), vz5); const __m256 vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2), vz6); const __m256 vt7 = _mm256_add_ps(_mm256_mul_ps(vn7, vminus_ln2), vz7); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc3, vt0), vc2); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc3, vt1), vc2); __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc3, vt2), vc2); __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc3, vt3), vc2); __m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc3, vt4), vc2); __m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc3, vt5), vc2); __m256 vp6 = _mm256_add_ps(_mm256_mul_ps(vc3, vt6), vc2); __m256 vp7 = _mm256_add_ps(_mm256_mul_ps(vc3, vt7), vc2); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vtwo); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vtwo); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vtwo); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vtwo); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vtwo); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vtwo); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vtwo); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vts4 = _mm256_mul_ps(vt4, vs4); const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one); const __m256 vts5 = _mm256_mul_ps(vt5, vs5); const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one); const __m256 vts6 = _mm256_mul_ps(vt6, vs6); const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one); const __m256 vts7 = _mm256_mul_ps(vt7, vs7); const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one); const __m256 vemo0 = _mm256_add_ps(_mm256_mul_ps(vp0, vts0), vsmo0); const __m256 vemo1 = _mm256_add_ps(_mm256_mul_ps(vp1, vts1), vsmo1); const __m256 vemo2 = _mm256_add_ps(_mm256_mul_ps(vp2, vts2), vsmo2); const __m256 vemo3 = _mm256_add_ps(_mm256_mul_ps(vp3, vts3), vsmo3); const __m256 vemo4 = _mm256_add_ps(_mm256_mul_ps(vp4, vts4), vsmo4); const __m256 vemo5 = _mm256_add_ps(_mm256_mul_ps(vp5, vts5), vsmo5); const __m256 vemo6 = _mm256_add_ps(_mm256_mul_ps(vp6, vts6), vsmo6); const __m256 vemo7 = _mm256_add_ps(_mm256_mul_ps(vp7, vts7), vsmo7); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo); const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo); const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo); const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo); __m256 vrepo0 = _mm256_rcp_ps(vepo0); __m256 vrepo1 = _mm256_rcp_ps(vepo1); __m256 vrepo2 = _mm256_rcp_ps(vepo2); __m256 vrepo3 = _mm256_rcp_ps(vepo3); __m256 vrepo4 = _mm256_rcp_ps(vepo4); __m256 vrepo5 = _mm256_rcp_ps(vepo5); __m256 vrepo6 = _mm256_rcp_ps(vepo6); __m256 vrepo7 = _mm256_rcp_ps(vepo7); __m256 vy0 = _mm256_mul_ps(vemo0, vrepo0); __m256 vy1 = _mm256_mul_ps(vemo1, vrepo1); __m256 vy2 = _mm256_mul_ps(vemo2, vrepo2); __m256 vy3 = _mm256_mul_ps(vemo3, vrepo3); __m256 vy4 = _mm256_mul_ps(vemo4, vrepo4); __m256 vy5 = _mm256_mul_ps(vemo5, vrepo5); __m256 vy6 = _mm256_mul_ps(vemo6, vrepo6); __m256 vy7 = _mm256_mul_ps(vemo7, vrepo7); vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0); vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1); vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2); vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3); vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4); vy5 = _mm256_blendv_ps(vy5, vminus_one, vm5); vy6 = _mm256_blendv_ps(vy6, vminus_one, vm6); vy7 = _mm256_blendv_ps(vy7, vminus_one, vm7); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); __m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT); __m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT); __m128i vh6 = _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT); __m128i vh7 = _mm256_cvtps_ph(vy7, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); vh4 = _mm_xor_si128(vh4, vinvsignx4); vh5 = _mm_xor_si128(vh5, vinvsignx5); vh6 = _mm_xor_si128(vh6, vinvsignx6); vh7 = _mm_xor_si128(vh7, vinvsignx7); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); _mm_storeu_si128((__m128i*) (o + 32), vh4); _mm_storeu_si128((__m128i*) (o + 40), vh5); _mm_storeu_si128((__m128i*) (o + 48), vh6); _mm_storeu_si128((__m128i*) (o + 56), vh7); o += 64; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
16,642
46.551429
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-expm1minus-rr1-p3h2ts-rcp-x72.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_expm1minus_rr1_p3h2ts_rcp_x72( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 72 * sizeof(uint16_t); batch -= 72 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32)); const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40)); const __m128i vx6 = _mm_loadu_si128((const __m128i*) (i + 48)); const __m128i vx7 = _mm_loadu_si128((const __m128i*) (i + 56)); const __m128i vx8 = _mm_loadu_si128((const __m128i*) (i + 64)); i += 72; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask); const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask); const __m128i vabsx6 = _mm_or_si128(vx6, vsign_mask); const __m128i vabsx7 = _mm_or_si128(vx7, vsign_mask); const __m128i vabsx8 = _mm_or_si128(vx8, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); __m256 vz4 = _mm256_cvtph_ps(vabsx4); const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4); __m256 vz5 = _mm256_cvtph_ps(vabsx5); const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5); __m256 vz6 = _mm256_cvtph_ps(vabsx6); const __m128i vinvsignx6 = _mm_xor_si128(vx6, vabsx6); __m256 vz7 = _mm256_cvtph_ps(vabsx7); const __m128i vinvsignx7 = _mm_xor_si128(vx7, vabsx7); __m256 vz8 = _mm256_cvtph_ps(vabsx8); const __m128i vinvsignx8 = _mm_xor_si128(vx8, vabsx8); const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS); __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias); const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS); __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias); const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS); __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias); const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS); __m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias); const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS); __m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias); const __m256 vm5 = _mm256_cmp_ps(vz5, vsat_cutoff, _CMP_LE_OS); __m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias); const __m256 vm6 = _mm256_cmp_ps(vz6, vsat_cutoff, _CMP_LE_OS); __m256 vn6 = _mm256_add_ps(_mm256_mul_ps(vz6, vlog2e), vmagic_bias); const __m256 vm7 = _mm256_cmp_ps(vz7, vsat_cutoff, _CMP_LE_OS); __m256 vn7 = _mm256_add_ps(_mm256_mul_ps(vz7, vlog2e), vmagic_bias); const __m256 vm8 = _mm256_cmp_ps(vz8, vsat_cutoff, _CMP_LE_OS); __m256 vn8 = _mm256_add_ps(_mm256_mul_ps(vz8, vlog2e), vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1); __m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23))); vn4 = _mm256_sub_ps(vn4, vmagic_bias); const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1); __m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23))); vn5 = _mm256_sub_ps(vn5, vmagic_bias); const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1); __m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23))); vn6 = _mm256_sub_ps(vn6, vmagic_bias); const __m128 vn7_hi = _mm256_extractf128_ps(vn7, 1); __m256 vs7 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23))); vn7 = _mm256_sub_ps(vn7, vmagic_bias); const __m128 vn8_hi = _mm256_extractf128_ps(vn8, 1); __m256 vs8 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn8)), 23))); vn8 = _mm256_sub_ps(vn8, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23)); const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23)); const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23)); const __m128 vs7_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn7_hi), 23)); const __m128 vs8_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn8_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1); vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1); vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1); vs7 = _mm256_insertf128_ps(vs7, vs7_hi, 1); vs8 = _mm256_insertf128_ps(vs8, vs8_hi, 1); const __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2), vz0); const __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2), vz1); const __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2), vz2); const __m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2), vz3); const __m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2), vz4); const __m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2), vz5); const __m256 vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2), vz6); const __m256 vt7 = _mm256_add_ps(_mm256_mul_ps(vn7, vminus_ln2), vz7); const __m256 vt8 = _mm256_add_ps(_mm256_mul_ps(vn8, vminus_ln2), vz8); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc3, vt0), vc2); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc3, vt1), vc2); __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc3, vt2), vc2); __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc3, vt3), vc2); __m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc3, vt4), vc2); __m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc3, vt5), vc2); __m256 vp6 = _mm256_add_ps(_mm256_mul_ps(vc3, vt6), vc2); __m256 vp7 = _mm256_add_ps(_mm256_mul_ps(vc3, vt7), vc2); __m256 vp8 = _mm256_add_ps(_mm256_mul_ps(vc3, vt8), vc2); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vtwo); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vtwo); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vtwo); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vtwo); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vtwo); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vtwo); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vtwo); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vtwo); vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vts4 = _mm256_mul_ps(vt4, vs4); const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one); const __m256 vts5 = _mm256_mul_ps(vt5, vs5); const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one); const __m256 vts6 = _mm256_mul_ps(vt6, vs6); const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one); const __m256 vts7 = _mm256_mul_ps(vt7, vs7); const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one); const __m256 vts8 = _mm256_mul_ps(vt8, vs8); const __m256 vsmo8 = _mm256_add_ps(vs8, vminus_one); const __m256 vemo0 = _mm256_add_ps(_mm256_mul_ps(vp0, vts0), vsmo0); const __m256 vemo1 = _mm256_add_ps(_mm256_mul_ps(vp1, vts1), vsmo1); const __m256 vemo2 = _mm256_add_ps(_mm256_mul_ps(vp2, vts2), vsmo2); const __m256 vemo3 = _mm256_add_ps(_mm256_mul_ps(vp3, vts3), vsmo3); const __m256 vemo4 = _mm256_add_ps(_mm256_mul_ps(vp4, vts4), vsmo4); const __m256 vemo5 = _mm256_add_ps(_mm256_mul_ps(vp5, vts5), vsmo5); const __m256 vemo6 = _mm256_add_ps(_mm256_mul_ps(vp6, vts6), vsmo6); const __m256 vemo7 = _mm256_add_ps(_mm256_mul_ps(vp7, vts7), vsmo7); const __m256 vemo8 = _mm256_add_ps(_mm256_mul_ps(vp8, vts8), vsmo8); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo); const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo); const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo); const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo); const __m256 vepo8 = _mm256_add_ps(vemo8, vtwo); __m256 vrepo0 = _mm256_rcp_ps(vepo0); __m256 vrepo1 = _mm256_rcp_ps(vepo1); __m256 vrepo2 = _mm256_rcp_ps(vepo2); __m256 vrepo3 = _mm256_rcp_ps(vepo3); __m256 vrepo4 = _mm256_rcp_ps(vepo4); __m256 vrepo5 = _mm256_rcp_ps(vepo5); __m256 vrepo6 = _mm256_rcp_ps(vepo6); __m256 vrepo7 = _mm256_rcp_ps(vepo7); __m256 vrepo8 = _mm256_rcp_ps(vepo8); __m256 vy0 = _mm256_mul_ps(vemo0, vrepo0); __m256 vy1 = _mm256_mul_ps(vemo1, vrepo1); __m256 vy2 = _mm256_mul_ps(vemo2, vrepo2); __m256 vy3 = _mm256_mul_ps(vemo3, vrepo3); __m256 vy4 = _mm256_mul_ps(vemo4, vrepo4); __m256 vy5 = _mm256_mul_ps(vemo5, vrepo5); __m256 vy6 = _mm256_mul_ps(vemo6, vrepo6); __m256 vy7 = _mm256_mul_ps(vemo7, vrepo7); __m256 vy8 = _mm256_mul_ps(vemo8, vrepo8); vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0); vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1); vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2); vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3); vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4); vy5 = _mm256_blendv_ps(vy5, vminus_one, vm5); vy6 = _mm256_blendv_ps(vy6, vminus_one, vm6); vy7 = _mm256_blendv_ps(vy7, vminus_one, vm7); vy8 = _mm256_blendv_ps(vy8, vminus_one, vm8); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); __m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT); __m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT); __m128i vh6 = _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT); __m128i vh7 = _mm256_cvtps_ph(vy7, _MM_FROUND_TO_NEAREST_INT); __m128i vh8 = _mm256_cvtps_ph(vy8, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); vh4 = _mm_xor_si128(vh4, vinvsignx4); vh5 = _mm_xor_si128(vh5, vinvsignx5); vh6 = _mm_xor_si128(vh6, vinvsignx6); vh7 = _mm_xor_si128(vh7, vinvsignx7); vh8 = _mm_xor_si128(vh8, vinvsignx8); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); _mm_storeu_si128((__m128i*) (o + 32), vh4); _mm_storeu_si128((__m128i*) (o + 40), vh5); _mm_storeu_si128((__m128i*) (o + 48), vh6); _mm_storeu_si128((__m128i*) (o + 56), vh7); _mm_storeu_si128((__m128i*) (o + 64), vh8); o += 72; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
18,095
47.385027
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-expm1minus-rr1-p3h2ts-rcp-x8.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_expm1minus_rr1_p3h2ts_rcp_x8( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
4,913
34.352518
123
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-expm1minus-rr1-p3h2ts-rcp-x80.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_expm1minus_rr1_p3h2ts_rcp_x80( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 80 * sizeof(uint16_t); batch -= 80 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32)); const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40)); const __m128i vx6 = _mm_loadu_si128((const __m128i*) (i + 48)); const __m128i vx7 = _mm_loadu_si128((const __m128i*) (i + 56)); const __m128i vx8 = _mm_loadu_si128((const __m128i*) (i + 64)); const __m128i vx9 = _mm_loadu_si128((const __m128i*) (i + 72)); i += 80; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask); const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask); const __m128i vabsx6 = _mm_or_si128(vx6, vsign_mask); const __m128i vabsx7 = _mm_or_si128(vx7, vsign_mask); const __m128i vabsx8 = _mm_or_si128(vx8, vsign_mask); const __m128i vabsx9 = _mm_or_si128(vx9, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); __m256 vz4 = _mm256_cvtph_ps(vabsx4); const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4); __m256 vz5 = _mm256_cvtph_ps(vabsx5); const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5); __m256 vz6 = _mm256_cvtph_ps(vabsx6); const __m128i vinvsignx6 = _mm_xor_si128(vx6, vabsx6); __m256 vz7 = _mm256_cvtph_ps(vabsx7); const __m128i vinvsignx7 = _mm_xor_si128(vx7, vabsx7); __m256 vz8 = _mm256_cvtph_ps(vabsx8); const __m128i vinvsignx8 = _mm_xor_si128(vx8, vabsx8); __m256 vz9 = _mm256_cvtph_ps(vabsx9); const __m128i vinvsignx9 = _mm_xor_si128(vx9, vabsx9); const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS); __m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias); const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS); __m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias); const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS); __m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias); const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS); __m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias); const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS); __m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias); const __m256 vm5 = _mm256_cmp_ps(vz5, vsat_cutoff, _CMP_LE_OS); __m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias); const __m256 vm6 = _mm256_cmp_ps(vz6, vsat_cutoff, _CMP_LE_OS); __m256 vn6 = _mm256_add_ps(_mm256_mul_ps(vz6, vlog2e), vmagic_bias); const __m256 vm7 = _mm256_cmp_ps(vz7, vsat_cutoff, _CMP_LE_OS); __m256 vn7 = _mm256_add_ps(_mm256_mul_ps(vz7, vlog2e), vmagic_bias); const __m256 vm8 = _mm256_cmp_ps(vz8, vsat_cutoff, _CMP_LE_OS); __m256 vn8 = _mm256_add_ps(_mm256_mul_ps(vz8, vlog2e), vmagic_bias); const __m256 vm9 = _mm256_cmp_ps(vz9, vsat_cutoff, _CMP_LE_OS); __m256 vn9 = _mm256_add_ps(_mm256_mul_ps(vz9, vlog2e), vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1); __m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23))); vn4 = _mm256_sub_ps(vn4, vmagic_bias); const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1); __m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23))); vn5 = _mm256_sub_ps(vn5, vmagic_bias); const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1); __m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23))); vn6 = _mm256_sub_ps(vn6, vmagic_bias); const __m128 vn7_hi = _mm256_extractf128_ps(vn7, 1); __m256 vs7 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23))); vn7 = _mm256_sub_ps(vn7, vmagic_bias); const __m128 vn8_hi = _mm256_extractf128_ps(vn8, 1); __m256 vs8 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn8)), 23))); vn8 = _mm256_sub_ps(vn8, vmagic_bias); const __m128 vn9_hi = _mm256_extractf128_ps(vn9, 1); __m256 vs9 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn9)), 23))); vn9 = _mm256_sub_ps(vn9, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23)); const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23)); const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23)); const __m128 vs7_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn7_hi), 23)); const __m128 vs8_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn8_hi), 23)); const __m128 vs9_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn9_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1); vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1); vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1); vs7 = _mm256_insertf128_ps(vs7, vs7_hi, 1); vs8 = _mm256_insertf128_ps(vs8, vs8_hi, 1); vs9 = _mm256_insertf128_ps(vs9, vs9_hi, 1); const __m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2), vz0); const __m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2), vz1); const __m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2), vz2); const __m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2), vz3); const __m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2), vz4); const __m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2), vz5); const __m256 vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2), vz6); const __m256 vt7 = _mm256_add_ps(_mm256_mul_ps(vn7, vminus_ln2), vz7); const __m256 vt8 = _mm256_add_ps(_mm256_mul_ps(vn8, vminus_ln2), vz8); const __m256 vt9 = _mm256_add_ps(_mm256_mul_ps(vn9, vminus_ln2), vz9); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc3, vt0), vc2); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc3, vt1), vc2); __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc3, vt2), vc2); __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc3, vt3), vc2); __m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc3, vt4), vc2); __m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc3, vt5), vc2); __m256 vp6 = _mm256_add_ps(_mm256_mul_ps(vc3, vt6), vc2); __m256 vp7 = _mm256_add_ps(_mm256_mul_ps(vc3, vt7), vc2); __m256 vp8 = _mm256_add_ps(_mm256_mul_ps(vc3, vt8), vc2); __m256 vp9 = _mm256_add_ps(_mm256_mul_ps(vc3, vt9), vc2); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vtwo); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vtwo); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vtwo); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vtwo); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vtwo); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vtwo); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vtwo); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vtwo); vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vtwo); vp9 = _mm256_add_ps(_mm256_mul_ps(vp9, vt9), vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vts4 = _mm256_mul_ps(vt4, vs4); const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one); const __m256 vts5 = _mm256_mul_ps(vt5, vs5); const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one); const __m256 vts6 = _mm256_mul_ps(vt6, vs6); const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one); const __m256 vts7 = _mm256_mul_ps(vt7, vs7); const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one); const __m256 vts8 = _mm256_mul_ps(vt8, vs8); const __m256 vsmo8 = _mm256_add_ps(vs8, vminus_one); const __m256 vts9 = _mm256_mul_ps(vt9, vs9); const __m256 vsmo9 = _mm256_add_ps(vs9, vminus_one); const __m256 vemo0 = _mm256_add_ps(_mm256_mul_ps(vp0, vts0), vsmo0); const __m256 vemo1 = _mm256_add_ps(_mm256_mul_ps(vp1, vts1), vsmo1); const __m256 vemo2 = _mm256_add_ps(_mm256_mul_ps(vp2, vts2), vsmo2); const __m256 vemo3 = _mm256_add_ps(_mm256_mul_ps(vp3, vts3), vsmo3); const __m256 vemo4 = _mm256_add_ps(_mm256_mul_ps(vp4, vts4), vsmo4); const __m256 vemo5 = _mm256_add_ps(_mm256_mul_ps(vp5, vts5), vsmo5); const __m256 vemo6 = _mm256_add_ps(_mm256_mul_ps(vp6, vts6), vsmo6); const __m256 vemo7 = _mm256_add_ps(_mm256_mul_ps(vp7, vts7), vsmo7); const __m256 vemo8 = _mm256_add_ps(_mm256_mul_ps(vp8, vts8), vsmo8); const __m256 vemo9 = _mm256_add_ps(_mm256_mul_ps(vp9, vts9), vsmo9); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo); const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo); const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo); const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo); const __m256 vepo8 = _mm256_add_ps(vemo8, vtwo); const __m256 vepo9 = _mm256_add_ps(vemo9, vtwo); __m256 vrepo0 = _mm256_rcp_ps(vepo0); __m256 vrepo1 = _mm256_rcp_ps(vepo1); __m256 vrepo2 = _mm256_rcp_ps(vepo2); __m256 vrepo3 = _mm256_rcp_ps(vepo3); __m256 vrepo4 = _mm256_rcp_ps(vepo4); __m256 vrepo5 = _mm256_rcp_ps(vepo5); __m256 vrepo6 = _mm256_rcp_ps(vepo6); __m256 vrepo7 = _mm256_rcp_ps(vepo7); __m256 vrepo8 = _mm256_rcp_ps(vepo8); __m256 vrepo9 = _mm256_rcp_ps(vepo9); __m256 vy0 = _mm256_mul_ps(vemo0, vrepo0); __m256 vy1 = _mm256_mul_ps(vemo1, vrepo1); __m256 vy2 = _mm256_mul_ps(vemo2, vrepo2); __m256 vy3 = _mm256_mul_ps(vemo3, vrepo3); __m256 vy4 = _mm256_mul_ps(vemo4, vrepo4); __m256 vy5 = _mm256_mul_ps(vemo5, vrepo5); __m256 vy6 = _mm256_mul_ps(vemo6, vrepo6); __m256 vy7 = _mm256_mul_ps(vemo7, vrepo7); __m256 vy8 = _mm256_mul_ps(vemo8, vrepo8); __m256 vy9 = _mm256_mul_ps(vemo9, vrepo9); vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0); vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1); vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2); vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3); vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4); vy5 = _mm256_blendv_ps(vy5, vminus_one, vm5); vy6 = _mm256_blendv_ps(vy6, vminus_one, vm6); vy7 = _mm256_blendv_ps(vy7, vminus_one, vm7); vy8 = _mm256_blendv_ps(vy8, vminus_one, vm8); vy9 = _mm256_blendv_ps(vy9, vminus_one, vm9); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); __m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT); __m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT); __m128i vh6 = _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT); __m128i vh7 = _mm256_cvtps_ph(vy7, _MM_FROUND_TO_NEAREST_INT); __m128i vh8 = _mm256_cvtps_ph(vy8, _MM_FROUND_TO_NEAREST_INT); __m128i vh9 = _mm256_cvtps_ph(vy9, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); vh4 = _mm_xor_si128(vh4, vinvsignx4); vh5 = _mm_xor_si128(vh5, vinvsignx5); vh6 = _mm_xor_si128(vh6, vinvsignx6); vh7 = _mm_xor_si128(vh7, vinvsignx7); vh8 = _mm_xor_si128(vh8, vinvsignx8); vh9 = _mm_xor_si128(vh9, vinvsignx9); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); _mm_storeu_si128((__m128i*) (o + 32), vh4); _mm_storeu_si128((__m128i*) (o + 40), vh5); _mm_storeu_si128((__m128i*) (o + 48), vh6); _mm_storeu_si128((__m128i*) (o + 56), vh7); _mm_storeu_si128((__m128i*) (o + 64), vh8); _mm_storeu_si128((__m128i*) (o + 72), vh9); o += 80; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2), vz); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc3, vt), vc2); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_add_ps(_mm256_mul_ps(vp, vts), vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
19,548
48.11809
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-polynomial-p19h9t2-x16.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-polynomial.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <math.h> #include <immintrin.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_polynomial_p19h9t2_x16( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m256 vneg_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.neg_sat_cutoff); const __m256 vpos_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.pos_sat_cutoff); const __m256 vc19 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c19); const __m256 vc17 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c17); const __m256 vc15 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c15); const __m256 vc13 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c13); const __m256 vc11 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c11); const __m256 vc9 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c9); const __m256 vc7 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c7); const __m256 vc5 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c5); const __m256 vc3 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c3); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) { __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8))); i += 16; vx0 = _mm256_max_ps(vneg_sat_cutoff, vx0); vx1 = _mm256_max_ps(vneg_sat_cutoff, vx1); vx0 = _mm256_min_ps(vpos_sat_cutoff, vx0); vx1 = _mm256_min_ps(vpos_sat_cutoff, vx1); const __m256 vt0 = _mm256_mul_ps(vx0, vx0); const __m256 vt1 = _mm256_mul_ps(vx1, vx1); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc19, vt0), vc17); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc19, vt1), vc17); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc15); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc15); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc13); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc13); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc11); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc11); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc9); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc9); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc7); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc7); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc5); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc5); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3); const __m256 vxt0 = _mm256_mul_ps(vx0, vt0); const __m256 vxt1 = _mm256_mul_ps(vx1, vt1); const __m256 vy0 = _mm256_add_ps(_mm256_mul_ps(vp0, vxt0), vx0); const __m256 vy1 = _mm256_add_ps(_mm256_mul_ps(vp1, vxt1), vx1); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT)); o += 16; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); i += 8; vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc19, vt), vc17); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc15); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc13); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc11); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc9); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc7); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc5); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_add_ps(_mm256_mul_ps(vp, vxt), vx); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT)); o += 8; } if (batch != 0) { __m256 vx = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) i)); vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc19, vt), vc17); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc15); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc13); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc11); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc9); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc7); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc5); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_add_ps(_mm256_mul_ps(vp, vxt), vx); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
5,777
37.52
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-polynomial-p19h9t2-x24.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-polynomial.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <math.h> #include <immintrin.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_polynomial_p19h9t2_x24( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m256 vneg_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.neg_sat_cutoff); const __m256 vpos_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.pos_sat_cutoff); const __m256 vc19 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c19); const __m256 vc17 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c17); const __m256 vc15 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c15); const __m256 vc13 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c13); const __m256 vc11 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c11); const __m256 vc9 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c9); const __m256 vc7 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c7); const __m256 vc5 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c5); const __m256 vc3 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c3); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 24 * sizeof(uint16_t); batch -= 24 * sizeof(uint16_t)) { __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8))); __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16))); i += 24; vx0 = _mm256_max_ps(vneg_sat_cutoff, vx0); vx1 = _mm256_max_ps(vneg_sat_cutoff, vx1); vx2 = _mm256_max_ps(vneg_sat_cutoff, vx2); vx0 = _mm256_min_ps(vpos_sat_cutoff, vx0); vx1 = _mm256_min_ps(vpos_sat_cutoff, vx1); vx2 = _mm256_min_ps(vpos_sat_cutoff, vx2); const __m256 vt0 = _mm256_mul_ps(vx0, vx0); const __m256 vt1 = _mm256_mul_ps(vx1, vx1); const __m256 vt2 = _mm256_mul_ps(vx2, vx2); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc19, vt0), vc17); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc19, vt1), vc17); __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc19, vt2), vc17); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc15); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc15); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc15); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc13); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc13); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc13); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc11); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc11); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc11); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc9); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc9); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc9); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc7); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc7); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc7); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc5); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc5); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc5); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3); const __m256 vxt0 = _mm256_mul_ps(vx0, vt0); const __m256 vxt1 = _mm256_mul_ps(vx1, vt1); const __m256 vxt2 = _mm256_mul_ps(vx2, vt2); const __m256 vy0 = _mm256_add_ps(_mm256_mul_ps(vp0, vxt0), vx0); const __m256 vy1 = _mm256_add_ps(_mm256_mul_ps(vp1, vxt1), vx1); const __m256 vy2 = _mm256_add_ps(_mm256_mul_ps(vp2, vxt2), vx2); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT)); o += 24; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); i += 8; vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc19, vt), vc17); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc15); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc13); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc11); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc9); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc7); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc5); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_add_ps(_mm256_mul_ps(vp, vxt), vx); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT)); o += 8; } if (batch != 0) { __m256 vx = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) i)); vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc19, vt), vc17); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc15); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc13); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc11); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc9); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc7); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc5); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_add_ps(_mm256_mul_ps(vp, vxt), vx); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
6,659
39.363636
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-polynomial-p19h9t2-x32.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-polynomial.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <math.h> #include <immintrin.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_polynomial_p19h9t2_x32( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m256 vneg_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.neg_sat_cutoff); const __m256 vpos_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.pos_sat_cutoff); const __m256 vc19 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c19); const __m256 vc17 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c17); const __m256 vc15 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c15); const __m256 vc13 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c13); const __m256 vc11 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c11); const __m256 vc9 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c9); const __m256 vc7 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c7); const __m256 vc5 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c5); const __m256 vc3 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c3); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) { __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8))); __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16))); __m256 vx3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24))); i += 32; vx0 = _mm256_max_ps(vneg_sat_cutoff, vx0); vx1 = _mm256_max_ps(vneg_sat_cutoff, vx1); vx2 = _mm256_max_ps(vneg_sat_cutoff, vx2); vx3 = _mm256_max_ps(vneg_sat_cutoff, vx3); vx0 = _mm256_min_ps(vpos_sat_cutoff, vx0); vx1 = _mm256_min_ps(vpos_sat_cutoff, vx1); vx2 = _mm256_min_ps(vpos_sat_cutoff, vx2); vx3 = _mm256_min_ps(vpos_sat_cutoff, vx3); const __m256 vt0 = _mm256_mul_ps(vx0, vx0); const __m256 vt1 = _mm256_mul_ps(vx1, vx1); const __m256 vt2 = _mm256_mul_ps(vx2, vx2); const __m256 vt3 = _mm256_mul_ps(vx3, vx3); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc19, vt0), vc17); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc19, vt1), vc17); __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc19, vt2), vc17); __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc19, vt3), vc17); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc15); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc15); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc15); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc15); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc13); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc13); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc13); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc13); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc11); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc11); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc11); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc11); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc9); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc9); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc9); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc9); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc7); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc7); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc7); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc7); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc5); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc5); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc5); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc5); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3); const __m256 vxt0 = _mm256_mul_ps(vx0, vt0); const __m256 vxt1 = _mm256_mul_ps(vx1, vt1); const __m256 vxt2 = _mm256_mul_ps(vx2, vt2); const __m256 vxt3 = _mm256_mul_ps(vx3, vt3); const __m256 vy0 = _mm256_add_ps(_mm256_mul_ps(vp0, vxt0), vx0); const __m256 vy1 = _mm256_add_ps(_mm256_mul_ps(vp1, vxt1), vx1); const __m256 vy2 = _mm256_add_ps(_mm256_mul_ps(vp2, vxt2), vx2); const __m256 vy3 = _mm256_add_ps(_mm256_mul_ps(vp3, vxt3), vx3); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT)); o += 32; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); i += 8; vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc19, vt), vc17); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc15); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc13); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc11); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc9); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc7); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc5); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_add_ps(_mm256_mul_ps(vp, vxt), vx); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT)); o += 8; } if (batch != 0) { __m256 vx = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) i)); vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc19, vt), vc17); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc15); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc13); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc11); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc9); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc7); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc5); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_add_ps(_mm256_mul_ps(vp, vxt), vx); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
7,541
40.9
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-polynomial-p19h9t2-x40.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-polynomial.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <math.h> #include <immintrin.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_polynomial_p19h9t2_x40( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m256 vneg_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.neg_sat_cutoff); const __m256 vpos_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.pos_sat_cutoff); const __m256 vc19 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c19); const __m256 vc17 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c17); const __m256 vc15 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c15); const __m256 vc13 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c13); const __m256 vc11 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c11); const __m256 vc9 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c9); const __m256 vc7 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c7); const __m256 vc5 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c5); const __m256 vc3 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c3); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 40 * sizeof(uint16_t); batch -= 40 * sizeof(uint16_t)) { __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8))); __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16))); __m256 vx3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24))); __m256 vx4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32))); i += 40; vx0 = _mm256_max_ps(vneg_sat_cutoff, vx0); vx1 = _mm256_max_ps(vneg_sat_cutoff, vx1); vx2 = _mm256_max_ps(vneg_sat_cutoff, vx2); vx3 = _mm256_max_ps(vneg_sat_cutoff, vx3); vx4 = _mm256_max_ps(vneg_sat_cutoff, vx4); vx0 = _mm256_min_ps(vpos_sat_cutoff, vx0); vx1 = _mm256_min_ps(vpos_sat_cutoff, vx1); vx2 = _mm256_min_ps(vpos_sat_cutoff, vx2); vx3 = _mm256_min_ps(vpos_sat_cutoff, vx3); vx4 = _mm256_min_ps(vpos_sat_cutoff, vx4); const __m256 vt0 = _mm256_mul_ps(vx0, vx0); const __m256 vt1 = _mm256_mul_ps(vx1, vx1); const __m256 vt2 = _mm256_mul_ps(vx2, vx2); const __m256 vt3 = _mm256_mul_ps(vx3, vx3); const __m256 vt4 = _mm256_mul_ps(vx4, vx4); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc19, vt0), vc17); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc19, vt1), vc17); __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc19, vt2), vc17); __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc19, vt3), vc17); __m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc19, vt4), vc17); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc15); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc15); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc15); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc15); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc15); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc13); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc13); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc13); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc13); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc13); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc11); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc11); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc11); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc11); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc11); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc9); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc9); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc9); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc9); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc9); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc7); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc7); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc7); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc7); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc7); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc5); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc5); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc5); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc5); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc5); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc3); const __m256 vxt0 = _mm256_mul_ps(vx0, vt0); const __m256 vxt1 = _mm256_mul_ps(vx1, vt1); const __m256 vxt2 = _mm256_mul_ps(vx2, vt2); const __m256 vxt3 = _mm256_mul_ps(vx3, vt3); const __m256 vxt4 = _mm256_mul_ps(vx4, vt4); const __m256 vy0 = _mm256_add_ps(_mm256_mul_ps(vp0, vxt0), vx0); const __m256 vy1 = _mm256_add_ps(_mm256_mul_ps(vp1, vxt1), vx1); const __m256 vy2 = _mm256_add_ps(_mm256_mul_ps(vp2, vxt2), vx2); const __m256 vy3 = _mm256_add_ps(_mm256_mul_ps(vp3, vxt3), vx3); const __m256 vy4 = _mm256_add_ps(_mm256_mul_ps(vp4, vxt4), vx4); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT)); o += 40; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); i += 8; vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc19, vt), vc17); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc15); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc13); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc11); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc9); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc7); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc5); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_add_ps(_mm256_mul_ps(vp, vxt), vx); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT)); o += 8; } if (batch != 0) { __m256 vx = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) i)); vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc19, vt), vc17); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc15); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc13); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc11); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc9); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc7); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc5); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_add_ps(_mm256_mul_ps(vp, vxt), vx); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
8,423
42.2
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-polynomial-p19h9t2-x48.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-polynomial.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <math.h> #include <immintrin.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_polynomial_p19h9t2_x48( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m256 vneg_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.neg_sat_cutoff); const __m256 vpos_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.pos_sat_cutoff); const __m256 vc19 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c19); const __m256 vc17 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c17); const __m256 vc15 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c15); const __m256 vc13 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c13); const __m256 vc11 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c11); const __m256 vc9 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c9); const __m256 vc7 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c7); const __m256 vc5 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c5); const __m256 vc3 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c3); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 48 * sizeof(uint16_t); batch -= 48 * sizeof(uint16_t)) { __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8))); __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16))); __m256 vx3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24))); __m256 vx4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32))); __m256 vx5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40))); i += 48; vx0 = _mm256_max_ps(vneg_sat_cutoff, vx0); vx1 = _mm256_max_ps(vneg_sat_cutoff, vx1); vx2 = _mm256_max_ps(vneg_sat_cutoff, vx2); vx3 = _mm256_max_ps(vneg_sat_cutoff, vx3); vx4 = _mm256_max_ps(vneg_sat_cutoff, vx4); vx5 = _mm256_max_ps(vneg_sat_cutoff, vx5); vx0 = _mm256_min_ps(vpos_sat_cutoff, vx0); vx1 = _mm256_min_ps(vpos_sat_cutoff, vx1); vx2 = _mm256_min_ps(vpos_sat_cutoff, vx2); vx3 = _mm256_min_ps(vpos_sat_cutoff, vx3); vx4 = _mm256_min_ps(vpos_sat_cutoff, vx4); vx5 = _mm256_min_ps(vpos_sat_cutoff, vx5); const __m256 vt0 = _mm256_mul_ps(vx0, vx0); const __m256 vt1 = _mm256_mul_ps(vx1, vx1); const __m256 vt2 = _mm256_mul_ps(vx2, vx2); const __m256 vt3 = _mm256_mul_ps(vx3, vx3); const __m256 vt4 = _mm256_mul_ps(vx4, vx4); const __m256 vt5 = _mm256_mul_ps(vx5, vx5); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc19, vt0), vc17); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc19, vt1), vc17); __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc19, vt2), vc17); __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc19, vt3), vc17); __m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc19, vt4), vc17); __m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc19, vt5), vc17); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc15); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc15); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc15); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc15); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc15); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc15); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc13); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc13); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc13); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc13); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc13); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc13); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc11); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc11); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc11); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc11); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc11); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc11); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc9); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc9); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc9); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc9); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc9); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc9); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc7); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc7); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc7); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc7); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc7); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc7); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc5); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc5); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc5); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc5); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc5); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc5); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc3); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc3); const __m256 vxt0 = _mm256_mul_ps(vx0, vt0); const __m256 vxt1 = _mm256_mul_ps(vx1, vt1); const __m256 vxt2 = _mm256_mul_ps(vx2, vt2); const __m256 vxt3 = _mm256_mul_ps(vx3, vt3); const __m256 vxt4 = _mm256_mul_ps(vx4, vt4); const __m256 vxt5 = _mm256_mul_ps(vx5, vt5); const __m256 vy0 = _mm256_add_ps(_mm256_mul_ps(vp0, vxt0), vx0); const __m256 vy1 = _mm256_add_ps(_mm256_mul_ps(vp1, vxt1), vx1); const __m256 vy2 = _mm256_add_ps(_mm256_mul_ps(vp2, vxt2), vx2); const __m256 vy3 = _mm256_add_ps(_mm256_mul_ps(vp3, vxt3), vx3); const __m256 vy4 = _mm256_add_ps(_mm256_mul_ps(vp4, vxt4), vx4); const __m256 vy5 = _mm256_add_ps(_mm256_mul_ps(vp5, vxt5), vx5); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT)); o += 48; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); i += 8; vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc19, vt), vc17); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc15); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc13); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc11); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc9); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc7); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc5); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_add_ps(_mm256_mul_ps(vp, vxt), vx); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT)); o += 8; } if (batch != 0) { __m256 vx = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) i)); vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc19, vt), vc17); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc15); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc13); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc11); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc9); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc7); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc5); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_add_ps(_mm256_mul_ps(vp, vxt), vx); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
9,305
43.314286
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-polynomial-p19h9t2-x56.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-polynomial.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <math.h> #include <immintrin.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_polynomial_p19h9t2_x56( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m256 vneg_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.neg_sat_cutoff); const __m256 vpos_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.pos_sat_cutoff); const __m256 vc19 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c19); const __m256 vc17 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c17); const __m256 vc15 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c15); const __m256 vc13 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c13); const __m256 vc11 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c11); const __m256 vc9 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c9); const __m256 vc7 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c7); const __m256 vc5 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c5); const __m256 vc3 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c3); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 56 * sizeof(uint16_t); batch -= 56 * sizeof(uint16_t)) { __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8))); __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16))); __m256 vx3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24))); __m256 vx4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32))); __m256 vx5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40))); __m256 vx6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 48))); i += 56; vx0 = _mm256_max_ps(vneg_sat_cutoff, vx0); vx1 = _mm256_max_ps(vneg_sat_cutoff, vx1); vx2 = _mm256_max_ps(vneg_sat_cutoff, vx2); vx3 = _mm256_max_ps(vneg_sat_cutoff, vx3); vx4 = _mm256_max_ps(vneg_sat_cutoff, vx4); vx5 = _mm256_max_ps(vneg_sat_cutoff, vx5); vx6 = _mm256_max_ps(vneg_sat_cutoff, vx6); vx0 = _mm256_min_ps(vpos_sat_cutoff, vx0); vx1 = _mm256_min_ps(vpos_sat_cutoff, vx1); vx2 = _mm256_min_ps(vpos_sat_cutoff, vx2); vx3 = _mm256_min_ps(vpos_sat_cutoff, vx3); vx4 = _mm256_min_ps(vpos_sat_cutoff, vx4); vx5 = _mm256_min_ps(vpos_sat_cutoff, vx5); vx6 = _mm256_min_ps(vpos_sat_cutoff, vx6); const __m256 vt0 = _mm256_mul_ps(vx0, vx0); const __m256 vt1 = _mm256_mul_ps(vx1, vx1); const __m256 vt2 = _mm256_mul_ps(vx2, vx2); const __m256 vt3 = _mm256_mul_ps(vx3, vx3); const __m256 vt4 = _mm256_mul_ps(vx4, vx4); const __m256 vt5 = _mm256_mul_ps(vx5, vx5); const __m256 vt6 = _mm256_mul_ps(vx6, vx6); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc19, vt0), vc17); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc19, vt1), vc17); __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc19, vt2), vc17); __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc19, vt3), vc17); __m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc19, vt4), vc17); __m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc19, vt5), vc17); __m256 vp6 = _mm256_add_ps(_mm256_mul_ps(vc19, vt6), vc17); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc15); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc15); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc15); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc15); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc15); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc15); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc15); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc13); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc13); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc13); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc13); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc13); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc13); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc13); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc11); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc11); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc11); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc11); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc11); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc11); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc11); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc9); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc9); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc9); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc9); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc9); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc9); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc9); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc7); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc7); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc7); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc7); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc7); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc7); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc7); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc5); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc5); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc5); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc5); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc5); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc5); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc5); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc3); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc3); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc3); const __m256 vxt0 = _mm256_mul_ps(vx0, vt0); const __m256 vxt1 = _mm256_mul_ps(vx1, vt1); const __m256 vxt2 = _mm256_mul_ps(vx2, vt2); const __m256 vxt3 = _mm256_mul_ps(vx3, vt3); const __m256 vxt4 = _mm256_mul_ps(vx4, vt4); const __m256 vxt5 = _mm256_mul_ps(vx5, vt5); const __m256 vxt6 = _mm256_mul_ps(vx6, vt6); const __m256 vy0 = _mm256_add_ps(_mm256_mul_ps(vp0, vxt0), vx0); const __m256 vy1 = _mm256_add_ps(_mm256_mul_ps(vp1, vxt1), vx1); const __m256 vy2 = _mm256_add_ps(_mm256_mul_ps(vp2, vxt2), vx2); const __m256 vy3 = _mm256_add_ps(_mm256_mul_ps(vp3, vxt3), vx3); const __m256 vy4 = _mm256_add_ps(_mm256_mul_ps(vp4, vxt4), vx4); const __m256 vy5 = _mm256_add_ps(_mm256_mul_ps(vp5, vxt5), vx5); const __m256 vy6 = _mm256_add_ps(_mm256_mul_ps(vp6, vxt6), vx6); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 48), _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT)); o += 56; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); i += 8; vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc19, vt), vc17); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc15); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc13); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc11); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc9); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc7); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc5); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_add_ps(_mm256_mul_ps(vp, vxt), vx); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT)); o += 8; } if (batch != 0) { __m256 vx = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) i)); vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc19, vt), vc17); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc15); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc13); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc11); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc9); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc7); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc5); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_add_ps(_mm256_mul_ps(vp, vxt), vx); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
10,187
44.28
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-polynomial-p19h9t2-x64.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-polynomial.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <math.h> #include <immintrin.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_polynomial_p19h9t2_x64( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m256 vneg_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.neg_sat_cutoff); const __m256 vpos_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.pos_sat_cutoff); const __m256 vc19 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c19); const __m256 vc17 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c17); const __m256 vc15 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c15); const __m256 vc13 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c13); const __m256 vc11 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c11); const __m256 vc9 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c9); const __m256 vc7 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c7); const __m256 vc5 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c5); const __m256 vc3 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c3); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 64 * sizeof(uint16_t); batch -= 64 * sizeof(uint16_t)) { __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8))); __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16))); __m256 vx3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24))); __m256 vx4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32))); __m256 vx5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40))); __m256 vx6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 48))); __m256 vx7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 56))); i += 64; vx0 = _mm256_max_ps(vneg_sat_cutoff, vx0); vx1 = _mm256_max_ps(vneg_sat_cutoff, vx1); vx2 = _mm256_max_ps(vneg_sat_cutoff, vx2); vx3 = _mm256_max_ps(vneg_sat_cutoff, vx3); vx4 = _mm256_max_ps(vneg_sat_cutoff, vx4); vx5 = _mm256_max_ps(vneg_sat_cutoff, vx5); vx6 = _mm256_max_ps(vneg_sat_cutoff, vx6); vx7 = _mm256_max_ps(vneg_sat_cutoff, vx7); vx0 = _mm256_min_ps(vpos_sat_cutoff, vx0); vx1 = _mm256_min_ps(vpos_sat_cutoff, vx1); vx2 = _mm256_min_ps(vpos_sat_cutoff, vx2); vx3 = _mm256_min_ps(vpos_sat_cutoff, vx3); vx4 = _mm256_min_ps(vpos_sat_cutoff, vx4); vx5 = _mm256_min_ps(vpos_sat_cutoff, vx5); vx6 = _mm256_min_ps(vpos_sat_cutoff, vx6); vx7 = _mm256_min_ps(vpos_sat_cutoff, vx7); const __m256 vt0 = _mm256_mul_ps(vx0, vx0); const __m256 vt1 = _mm256_mul_ps(vx1, vx1); const __m256 vt2 = _mm256_mul_ps(vx2, vx2); const __m256 vt3 = _mm256_mul_ps(vx3, vx3); const __m256 vt4 = _mm256_mul_ps(vx4, vx4); const __m256 vt5 = _mm256_mul_ps(vx5, vx5); const __m256 vt6 = _mm256_mul_ps(vx6, vx6); const __m256 vt7 = _mm256_mul_ps(vx7, vx7); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc19, vt0), vc17); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc19, vt1), vc17); __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc19, vt2), vc17); __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc19, vt3), vc17); __m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc19, vt4), vc17); __m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc19, vt5), vc17); __m256 vp6 = _mm256_add_ps(_mm256_mul_ps(vc19, vt6), vc17); __m256 vp7 = _mm256_add_ps(_mm256_mul_ps(vc19, vt7), vc17); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc15); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc15); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc15); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc15); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc15); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc15); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc15); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc15); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc13); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc13); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc13); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc13); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc13); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc13); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc13); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc13); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc11); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc11); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc11); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc11); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc11); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc11); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc11); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc11); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc9); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc9); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc9); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc9); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc9); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc9); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc9); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc9); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc7); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc7); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc7); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc7); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc7); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc7); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc7); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc7); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc5); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc5); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc5); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc5); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc5); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc5); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc5); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc5); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc3); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc3); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc3); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc3); const __m256 vxt0 = _mm256_mul_ps(vx0, vt0); const __m256 vxt1 = _mm256_mul_ps(vx1, vt1); const __m256 vxt2 = _mm256_mul_ps(vx2, vt2); const __m256 vxt3 = _mm256_mul_ps(vx3, vt3); const __m256 vxt4 = _mm256_mul_ps(vx4, vt4); const __m256 vxt5 = _mm256_mul_ps(vx5, vt5); const __m256 vxt6 = _mm256_mul_ps(vx6, vt6); const __m256 vxt7 = _mm256_mul_ps(vx7, vt7); const __m256 vy0 = _mm256_add_ps(_mm256_mul_ps(vp0, vxt0), vx0); const __m256 vy1 = _mm256_add_ps(_mm256_mul_ps(vp1, vxt1), vx1); const __m256 vy2 = _mm256_add_ps(_mm256_mul_ps(vp2, vxt2), vx2); const __m256 vy3 = _mm256_add_ps(_mm256_mul_ps(vp3, vxt3), vx3); const __m256 vy4 = _mm256_add_ps(_mm256_mul_ps(vp4, vxt4), vx4); const __m256 vy5 = _mm256_add_ps(_mm256_mul_ps(vp5, vxt5), vx5); const __m256 vy6 = _mm256_add_ps(_mm256_mul_ps(vp6, vxt6), vx6); const __m256 vy7 = _mm256_add_ps(_mm256_mul_ps(vp7, vxt7), vx7); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 48), _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 56), _mm256_cvtps_ph(vy7, _MM_FROUND_TO_NEAREST_INT)); o += 64; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); i += 8; vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc19, vt), vc17); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc15); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc13); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc11); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc9); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc7); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc5); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_add_ps(_mm256_mul_ps(vp, vxt), vx); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT)); o += 8; } if (batch != 0) { __m256 vx = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) i)); vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc19, vt), vc17); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc15); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc13); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc11); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc9); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc7); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc5); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_add_ps(_mm256_mul_ps(vp, vxt), vx); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
11,069
45.125
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-polynomial-p19h9t2-x72.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-polynomial.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <math.h> #include <immintrin.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_polynomial_p19h9t2_x72( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m256 vneg_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.neg_sat_cutoff); const __m256 vpos_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.pos_sat_cutoff); const __m256 vc19 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c19); const __m256 vc17 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c17); const __m256 vc15 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c15); const __m256 vc13 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c13); const __m256 vc11 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c11); const __m256 vc9 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c9); const __m256 vc7 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c7); const __m256 vc5 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c5); const __m256 vc3 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c3); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 72 * sizeof(uint16_t); batch -= 72 * sizeof(uint16_t)) { __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8))); __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16))); __m256 vx3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24))); __m256 vx4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32))); __m256 vx5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40))); __m256 vx6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 48))); __m256 vx7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 56))); __m256 vx8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 64))); i += 72; vx0 = _mm256_max_ps(vneg_sat_cutoff, vx0); vx1 = _mm256_max_ps(vneg_sat_cutoff, vx1); vx2 = _mm256_max_ps(vneg_sat_cutoff, vx2); vx3 = _mm256_max_ps(vneg_sat_cutoff, vx3); vx4 = _mm256_max_ps(vneg_sat_cutoff, vx4); vx5 = _mm256_max_ps(vneg_sat_cutoff, vx5); vx6 = _mm256_max_ps(vneg_sat_cutoff, vx6); vx7 = _mm256_max_ps(vneg_sat_cutoff, vx7); vx8 = _mm256_max_ps(vneg_sat_cutoff, vx8); vx0 = _mm256_min_ps(vpos_sat_cutoff, vx0); vx1 = _mm256_min_ps(vpos_sat_cutoff, vx1); vx2 = _mm256_min_ps(vpos_sat_cutoff, vx2); vx3 = _mm256_min_ps(vpos_sat_cutoff, vx3); vx4 = _mm256_min_ps(vpos_sat_cutoff, vx4); vx5 = _mm256_min_ps(vpos_sat_cutoff, vx5); vx6 = _mm256_min_ps(vpos_sat_cutoff, vx6); vx7 = _mm256_min_ps(vpos_sat_cutoff, vx7); vx8 = _mm256_min_ps(vpos_sat_cutoff, vx8); const __m256 vt0 = _mm256_mul_ps(vx0, vx0); const __m256 vt1 = _mm256_mul_ps(vx1, vx1); const __m256 vt2 = _mm256_mul_ps(vx2, vx2); const __m256 vt3 = _mm256_mul_ps(vx3, vx3); const __m256 vt4 = _mm256_mul_ps(vx4, vx4); const __m256 vt5 = _mm256_mul_ps(vx5, vx5); const __m256 vt6 = _mm256_mul_ps(vx6, vx6); const __m256 vt7 = _mm256_mul_ps(vx7, vx7); const __m256 vt8 = _mm256_mul_ps(vx8, vx8); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc19, vt0), vc17); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc19, vt1), vc17); __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc19, vt2), vc17); __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc19, vt3), vc17); __m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc19, vt4), vc17); __m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc19, vt5), vc17); __m256 vp6 = _mm256_add_ps(_mm256_mul_ps(vc19, vt6), vc17); __m256 vp7 = _mm256_add_ps(_mm256_mul_ps(vc19, vt7), vc17); __m256 vp8 = _mm256_add_ps(_mm256_mul_ps(vc19, vt8), vc17); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc15); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc15); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc15); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc15); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc15); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc15); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc15); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc15); vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc15); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc13); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc13); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc13); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc13); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc13); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc13); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc13); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc13); vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc13); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc11); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc11); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc11); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc11); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc11); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc11); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc11); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc11); vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc11); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc9); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc9); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc9); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc9); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc9); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc9); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc9); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc9); vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc9); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc7); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc7); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc7); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc7); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc7); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc7); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc7); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc7); vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc7); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc5); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc5); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc5); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc5); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc5); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc5); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc5); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc5); vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc5); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc3); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc3); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc3); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc3); vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc3); const __m256 vxt0 = _mm256_mul_ps(vx0, vt0); const __m256 vxt1 = _mm256_mul_ps(vx1, vt1); const __m256 vxt2 = _mm256_mul_ps(vx2, vt2); const __m256 vxt3 = _mm256_mul_ps(vx3, vt3); const __m256 vxt4 = _mm256_mul_ps(vx4, vt4); const __m256 vxt5 = _mm256_mul_ps(vx5, vt5); const __m256 vxt6 = _mm256_mul_ps(vx6, vt6); const __m256 vxt7 = _mm256_mul_ps(vx7, vt7); const __m256 vxt8 = _mm256_mul_ps(vx8, vt8); const __m256 vy0 = _mm256_add_ps(_mm256_mul_ps(vp0, vxt0), vx0); const __m256 vy1 = _mm256_add_ps(_mm256_mul_ps(vp1, vxt1), vx1); const __m256 vy2 = _mm256_add_ps(_mm256_mul_ps(vp2, vxt2), vx2); const __m256 vy3 = _mm256_add_ps(_mm256_mul_ps(vp3, vxt3), vx3); const __m256 vy4 = _mm256_add_ps(_mm256_mul_ps(vp4, vxt4), vx4); const __m256 vy5 = _mm256_add_ps(_mm256_mul_ps(vp5, vxt5), vx5); const __m256 vy6 = _mm256_add_ps(_mm256_mul_ps(vp6, vxt6), vx6); const __m256 vy7 = _mm256_add_ps(_mm256_mul_ps(vp7, vxt7), vx7); const __m256 vy8 = _mm256_add_ps(_mm256_mul_ps(vp8, vxt8), vx8); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 48), _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 56), _mm256_cvtps_ph(vy7, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 64), _mm256_cvtps_ph(vy8, _MM_FROUND_TO_NEAREST_INT)); o += 72; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); i += 8; vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc19, vt), vc17); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc15); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc13); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc11); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc9); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc7); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc5); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_add_ps(_mm256_mul_ps(vp, vxt), vx); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT)); o += 8; } if (batch != 0) { __m256 vx = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) i)); vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc19, vt), vc17); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc15); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc13); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc11); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc9); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc7); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc5); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_add_ps(_mm256_mul_ps(vp, vxt), vx); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
11,951
45.870588
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-polynomial-p19h9t2-x8.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-polynomial.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <math.h> #include <immintrin.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_polynomial_p19h9t2_x8( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m256 vneg_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.neg_sat_cutoff); const __m256 vpos_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.pos_sat_cutoff); const __m256 vc19 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c19); const __m256 vc17 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c17); const __m256 vc15 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c15); const __m256 vc13 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c13); const __m256 vc11 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c11); const __m256 vc9 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c9); const __m256 vc7 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c7); const __m256 vc5 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c5); const __m256 vc3 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c3); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); i += 8; vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc19, vt), vc17); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc15); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc13); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc11); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc9); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc7); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc5); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_add_ps(_mm256_mul_ps(vp, vxt), vx); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT)); o += 8; } if (batch != 0) { __m256 vx = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) i)); vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc19, vt), vc17); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc15); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc13); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc11); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc9); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc7); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc5); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_add_ps(_mm256_mul_ps(vp, vxt), vx); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
3,918
34.306306
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-f16c-polynomial-p19h9t2-x80.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-polynomial.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <math.h> #include <immintrin.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__f16c_polynomial_p19h9t2_x80( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m256 vneg_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.neg_sat_cutoff); const __m256 vpos_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.pos_sat_cutoff); const __m256 vc19 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c19); const __m256 vc17 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c17); const __m256 vc15 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c15); const __m256 vc13 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c13); const __m256 vc11 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c11); const __m256 vc9 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c9); const __m256 vc7 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c7); const __m256 vc5 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c5); const __m256 vc3 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c3); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 80 * sizeof(uint16_t); batch -= 80 * sizeof(uint16_t)) { __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8))); __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16))); __m256 vx3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24))); __m256 vx4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32))); __m256 vx5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40))); __m256 vx6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 48))); __m256 vx7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 56))); __m256 vx8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 64))); __m256 vx9 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 72))); i += 80; vx0 = _mm256_max_ps(vneg_sat_cutoff, vx0); vx1 = _mm256_max_ps(vneg_sat_cutoff, vx1); vx2 = _mm256_max_ps(vneg_sat_cutoff, vx2); vx3 = _mm256_max_ps(vneg_sat_cutoff, vx3); vx4 = _mm256_max_ps(vneg_sat_cutoff, vx4); vx5 = _mm256_max_ps(vneg_sat_cutoff, vx5); vx6 = _mm256_max_ps(vneg_sat_cutoff, vx6); vx7 = _mm256_max_ps(vneg_sat_cutoff, vx7); vx8 = _mm256_max_ps(vneg_sat_cutoff, vx8); vx9 = _mm256_max_ps(vneg_sat_cutoff, vx9); vx0 = _mm256_min_ps(vpos_sat_cutoff, vx0); vx1 = _mm256_min_ps(vpos_sat_cutoff, vx1); vx2 = _mm256_min_ps(vpos_sat_cutoff, vx2); vx3 = _mm256_min_ps(vpos_sat_cutoff, vx3); vx4 = _mm256_min_ps(vpos_sat_cutoff, vx4); vx5 = _mm256_min_ps(vpos_sat_cutoff, vx5); vx6 = _mm256_min_ps(vpos_sat_cutoff, vx6); vx7 = _mm256_min_ps(vpos_sat_cutoff, vx7); vx8 = _mm256_min_ps(vpos_sat_cutoff, vx8); vx9 = _mm256_min_ps(vpos_sat_cutoff, vx9); const __m256 vt0 = _mm256_mul_ps(vx0, vx0); const __m256 vt1 = _mm256_mul_ps(vx1, vx1); const __m256 vt2 = _mm256_mul_ps(vx2, vx2); const __m256 vt3 = _mm256_mul_ps(vx3, vx3); const __m256 vt4 = _mm256_mul_ps(vx4, vx4); const __m256 vt5 = _mm256_mul_ps(vx5, vx5); const __m256 vt6 = _mm256_mul_ps(vx6, vx6); const __m256 vt7 = _mm256_mul_ps(vx7, vx7); const __m256 vt8 = _mm256_mul_ps(vx8, vx8); const __m256 vt9 = _mm256_mul_ps(vx9, vx9); __m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc19, vt0), vc17); __m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc19, vt1), vc17); __m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc19, vt2), vc17); __m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc19, vt3), vc17); __m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc19, vt4), vc17); __m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc19, vt5), vc17); __m256 vp6 = _mm256_add_ps(_mm256_mul_ps(vc19, vt6), vc17); __m256 vp7 = _mm256_add_ps(_mm256_mul_ps(vc19, vt7), vc17); __m256 vp8 = _mm256_add_ps(_mm256_mul_ps(vc19, vt8), vc17); __m256 vp9 = _mm256_add_ps(_mm256_mul_ps(vc19, vt9), vc17); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc15); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc15); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc15); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc15); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc15); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc15); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc15); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc15); vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc15); vp9 = _mm256_add_ps(_mm256_mul_ps(vp9, vt9), vc15); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc13); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc13); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc13); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc13); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc13); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc13); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc13); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc13); vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc13); vp9 = _mm256_add_ps(_mm256_mul_ps(vp9, vt9), vc13); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc11); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc11); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc11); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc11); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc11); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc11); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc11); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc11); vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc11); vp9 = _mm256_add_ps(_mm256_mul_ps(vp9, vt9), vc11); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc9); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc9); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc9); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc9); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc9); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc9); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc9); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc9); vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc9); vp9 = _mm256_add_ps(_mm256_mul_ps(vp9, vt9), vc9); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc7); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc7); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc7); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc7); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc7); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc7); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc7); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc7); vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc7); vp9 = _mm256_add_ps(_mm256_mul_ps(vp9, vt9), vc7); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc5); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc5); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc5); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc5); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc5); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc5); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc5); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc5); vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc5); vp9 = _mm256_add_ps(_mm256_mul_ps(vp9, vt9), vc5); vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3); vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3); vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3); vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3); vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc3); vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc3); vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc3); vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc3); vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc3); vp9 = _mm256_add_ps(_mm256_mul_ps(vp9, vt9), vc3); const __m256 vxt0 = _mm256_mul_ps(vx0, vt0); const __m256 vxt1 = _mm256_mul_ps(vx1, vt1); const __m256 vxt2 = _mm256_mul_ps(vx2, vt2); const __m256 vxt3 = _mm256_mul_ps(vx3, vt3); const __m256 vxt4 = _mm256_mul_ps(vx4, vt4); const __m256 vxt5 = _mm256_mul_ps(vx5, vt5); const __m256 vxt6 = _mm256_mul_ps(vx6, vt6); const __m256 vxt7 = _mm256_mul_ps(vx7, vt7); const __m256 vxt8 = _mm256_mul_ps(vx8, vt8); const __m256 vxt9 = _mm256_mul_ps(vx9, vt9); const __m256 vy0 = _mm256_add_ps(_mm256_mul_ps(vp0, vxt0), vx0); const __m256 vy1 = _mm256_add_ps(_mm256_mul_ps(vp1, vxt1), vx1); const __m256 vy2 = _mm256_add_ps(_mm256_mul_ps(vp2, vxt2), vx2); const __m256 vy3 = _mm256_add_ps(_mm256_mul_ps(vp3, vxt3), vx3); const __m256 vy4 = _mm256_add_ps(_mm256_mul_ps(vp4, vxt4), vx4); const __m256 vy5 = _mm256_add_ps(_mm256_mul_ps(vp5, vxt5), vx5); const __m256 vy6 = _mm256_add_ps(_mm256_mul_ps(vp6, vxt6), vx6); const __m256 vy7 = _mm256_add_ps(_mm256_mul_ps(vp7, vxt7), vx7); const __m256 vy8 = _mm256_add_ps(_mm256_mul_ps(vp8, vxt8), vx8); const __m256 vy9 = _mm256_add_ps(_mm256_mul_ps(vp9, vxt9), vx9); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 48), _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 56), _mm256_cvtps_ph(vy7, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 64), _mm256_cvtps_ph(vy8, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 72), _mm256_cvtps_ph(vy9, _MM_FROUND_TO_NEAREST_INT)); o += 80; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); i += 8; vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc19, vt), vc17); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc15); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc13); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc11); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc9); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc7); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc5); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_add_ps(_mm256_mul_ps(vp, vxt), vx); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT)); o += 8; } if (batch != 0) { __m256 vx = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) i)); vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = _mm256_add_ps(_mm256_mul_ps(vc19, vt), vc17); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc15); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc13); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc11); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc9); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc7); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc5); vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_add_ps(_mm256_mul_ps(vp, vxt), vx); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
12,833
46.533333
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-expm1minus-rr1-p3h2ts-div-x16.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_expm1minus_rr1_p3h2ts_div_x16( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); i += 16; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); vz0 = _mm256_max_ps(vsat_cutoff, vz0); __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias); vz1 = _mm256_max_ps(vsat_cutoff, vz1); __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0); const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1); __m256 vp0 = vc3; __m256 vp1 = vc3; vp0 = _mm256_fmadd_ps(vp0, vt0, vc2); vp1 = _mm256_fmadd_ps(vp1, vt1, vc2); vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo); vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0); const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); __m256 vy0 = _mm256_div_ps(vemo0, vepo0); __m256 vy1 = _mm256_div_ps(vemo1, vepo1); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); o += 16; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
7,255
35.462312
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-expm1minus-rr1-p3h2ts-div-x24.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_expm1minus_rr1_p3h2ts_div_x24( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 24 * sizeof(uint16_t); batch -= 24 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); i += 24; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); vz0 = _mm256_max_ps(vsat_cutoff, vz0); __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias); vz1 = _mm256_max_ps(vsat_cutoff, vz1); __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias); vz2 = _mm256_max_ps(vsat_cutoff, vz2); __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0); const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1); const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2); __m256 vp0 = vc3; __m256 vp1 = vc3; __m256 vp2 = vc3; vp0 = _mm256_fmadd_ps(vp0, vt0, vc2); vp1 = _mm256_fmadd_ps(vp1, vt1, vc2); vp2 = _mm256_fmadd_ps(vp2, vt2, vc2); vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo); vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo); vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0); const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1); const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); __m256 vy0 = _mm256_div_ps(vemo0, vepo0); __m256 vy1 = _mm256_div_ps(vemo1, vepo1); __m256 vy2 = _mm256_div_ps(vemo2, vepo2); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); o += 24; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
8,540
37.472973
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-expm1minus-rr1-p3h2ts-div-x32.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_expm1minus_rr1_p3h2ts_div_x32( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); i += 32; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); vz0 = _mm256_max_ps(vsat_cutoff, vz0); __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias); vz1 = _mm256_max_ps(vsat_cutoff, vz1); __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias); vz2 = _mm256_max_ps(vsat_cutoff, vz2); __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias); vz3 = _mm256_max_ps(vsat_cutoff, vz3); __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0); const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1); const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2); const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3); __m256 vp0 = vc3; __m256 vp1 = vc3; __m256 vp2 = vc3; __m256 vp3 = vc3; vp0 = _mm256_fmadd_ps(vp0, vt0, vc2); vp1 = _mm256_fmadd_ps(vp1, vt1, vc2); vp2 = _mm256_fmadd_ps(vp2, vt2, vc2); vp3 = _mm256_fmadd_ps(vp3, vt3, vc2); vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo); vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo); vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo); vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0); const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1); const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2); const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); __m256 vy0 = _mm256_div_ps(vemo0, vepo0); __m256 vy1 = _mm256_div_ps(vemo1, vepo1); __m256 vy2 = _mm256_div_ps(vemo2, vepo2); __m256 vy3 = _mm256_div_ps(vemo3, vepo3); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); o += 32; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
9,825
39.106122
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-expm1minus-rr1-p3h2ts-div-x40.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_expm1minus_rr1_p3h2ts_div_x40( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 40 * sizeof(uint16_t); batch -= 40 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32)); i += 40; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); __m256 vz4 = _mm256_cvtph_ps(vabsx4); const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4); vz0 = _mm256_max_ps(vsat_cutoff, vz0); __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias); vz1 = _mm256_max_ps(vsat_cutoff, vz1); __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias); vz2 = _mm256_max_ps(vsat_cutoff, vz2); __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias); vz3 = _mm256_max_ps(vsat_cutoff, vz3); __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias); vz4 = _mm256_max_ps(vsat_cutoff, vz4); __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1); __m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23))); vn4 = _mm256_sub_ps(vn4, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1); const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0); const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1); const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2); const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3); const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4); __m256 vp0 = vc3; __m256 vp1 = vc3; __m256 vp2 = vc3; __m256 vp3 = vc3; __m256 vp4 = vc3; vp0 = _mm256_fmadd_ps(vp0, vt0, vc2); vp1 = _mm256_fmadd_ps(vp1, vt1, vc2); vp2 = _mm256_fmadd_ps(vp2, vt2, vc2); vp3 = _mm256_fmadd_ps(vp3, vt3, vc2); vp4 = _mm256_fmadd_ps(vp4, vt4, vc2); vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo); vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo); vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo); vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo); vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vts4 = _mm256_mul_ps(vt4, vs4); const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one); const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0); const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1); const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2); const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3); const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo); __m256 vy0 = _mm256_div_ps(vemo0, vepo0); __m256 vy1 = _mm256_div_ps(vemo1, vepo1); __m256 vy2 = _mm256_div_ps(vemo2, vepo2); __m256 vy3 = _mm256_div_ps(vemo3, vepo3); __m256 vy4 = _mm256_div_ps(vemo4, vepo4); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); __m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); vh4 = _mm_xor_si128(vh4, vinvsignx4); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); _mm_storeu_si128((__m128i*) (o + 32), vh4); o += 40; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
11,110
40.458955
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-expm1minus-rr1-p3h2ts-div-x48.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_expm1minus_rr1_p3h2ts_div_x48( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 48 * sizeof(uint16_t); batch -= 48 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32)); const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40)); i += 48; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask); const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); __m256 vz4 = _mm256_cvtph_ps(vabsx4); const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4); __m256 vz5 = _mm256_cvtph_ps(vabsx5); const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5); vz0 = _mm256_max_ps(vsat_cutoff, vz0); __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias); vz1 = _mm256_max_ps(vsat_cutoff, vz1); __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias); vz2 = _mm256_max_ps(vsat_cutoff, vz2); __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias); vz3 = _mm256_max_ps(vsat_cutoff, vz3); __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias); vz4 = _mm256_max_ps(vsat_cutoff, vz4); __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias); vz5 = _mm256_max_ps(vsat_cutoff, vz5); __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1); __m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23))); vn4 = _mm256_sub_ps(vn4, vmagic_bias); const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1); __m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23))); vn5 = _mm256_sub_ps(vn5, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23)); const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1); vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1); const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0); const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1); const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2); const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3); const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4); const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5); __m256 vp0 = vc3; __m256 vp1 = vc3; __m256 vp2 = vc3; __m256 vp3 = vc3; __m256 vp4 = vc3; __m256 vp5 = vc3; vp0 = _mm256_fmadd_ps(vp0, vt0, vc2); vp1 = _mm256_fmadd_ps(vp1, vt1, vc2); vp2 = _mm256_fmadd_ps(vp2, vt2, vc2); vp3 = _mm256_fmadd_ps(vp3, vt3, vc2); vp4 = _mm256_fmadd_ps(vp4, vt4, vc2); vp5 = _mm256_fmadd_ps(vp5, vt5, vc2); vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo); vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo); vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo); vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo); vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo); vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vts4 = _mm256_mul_ps(vt4, vs4); const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one); const __m256 vts5 = _mm256_mul_ps(vt5, vs5); const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one); const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0); const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1); const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2); const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3); const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4); const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo); const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo); __m256 vy0 = _mm256_div_ps(vemo0, vepo0); __m256 vy1 = _mm256_div_ps(vemo1, vepo1); __m256 vy2 = _mm256_div_ps(vemo2, vepo2); __m256 vy3 = _mm256_div_ps(vemo3, vepo3); __m256 vy4 = _mm256_div_ps(vemo4, vepo4); __m256 vy5 = _mm256_div_ps(vemo5, vepo5); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); __m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT); __m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); vh4 = _mm_xor_si128(vh4, vinvsignx4); vh5 = _mm_xor_si128(vh5, vinvsignx5); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); _mm_storeu_si128((__m128i*) (o + 32), vh4); _mm_storeu_si128((__m128i*) (o + 40), vh5); o += 48; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
12,395
41.597938
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-expm1minus-rr1-p3h2ts-div-x56.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_expm1minus_rr1_p3h2ts_div_x56( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 56 * sizeof(uint16_t); batch -= 56 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32)); const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40)); const __m128i vx6 = _mm_loadu_si128((const __m128i*) (i + 48)); i += 56; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask); const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask); const __m128i vabsx6 = _mm_or_si128(vx6, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); __m256 vz4 = _mm256_cvtph_ps(vabsx4); const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4); __m256 vz5 = _mm256_cvtph_ps(vabsx5); const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5); __m256 vz6 = _mm256_cvtph_ps(vabsx6); const __m128i vinvsignx6 = _mm_xor_si128(vx6, vabsx6); vz0 = _mm256_max_ps(vsat_cutoff, vz0); __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias); vz1 = _mm256_max_ps(vsat_cutoff, vz1); __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias); vz2 = _mm256_max_ps(vsat_cutoff, vz2); __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias); vz3 = _mm256_max_ps(vsat_cutoff, vz3); __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias); vz4 = _mm256_max_ps(vsat_cutoff, vz4); __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias); vz5 = _mm256_max_ps(vsat_cutoff, vz5); __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias); vz6 = _mm256_max_ps(vsat_cutoff, vz6); __m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1); __m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23))); vn4 = _mm256_sub_ps(vn4, vmagic_bias); const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1); __m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23))); vn5 = _mm256_sub_ps(vn5, vmagic_bias); const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1); __m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23))); vn6 = _mm256_sub_ps(vn6, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23)); const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23)); const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1); vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1); vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1); const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0); const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1); const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2); const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3); const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4); const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5); const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6); __m256 vp0 = vc3; __m256 vp1 = vc3; __m256 vp2 = vc3; __m256 vp3 = vc3; __m256 vp4 = vc3; __m256 vp5 = vc3; __m256 vp6 = vc3; vp0 = _mm256_fmadd_ps(vp0, vt0, vc2); vp1 = _mm256_fmadd_ps(vp1, vt1, vc2); vp2 = _mm256_fmadd_ps(vp2, vt2, vc2); vp3 = _mm256_fmadd_ps(vp3, vt3, vc2); vp4 = _mm256_fmadd_ps(vp4, vt4, vc2); vp5 = _mm256_fmadd_ps(vp5, vt5, vc2); vp6 = _mm256_fmadd_ps(vp6, vt6, vc2); vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo); vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo); vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo); vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo); vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo); vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo); vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vts4 = _mm256_mul_ps(vt4, vs4); const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one); const __m256 vts5 = _mm256_mul_ps(vt5, vs5); const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one); const __m256 vts6 = _mm256_mul_ps(vt6, vs6); const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one); const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0); const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1); const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2); const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3); const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4); const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5); const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo); const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo); const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo); __m256 vy0 = _mm256_div_ps(vemo0, vepo0); __m256 vy1 = _mm256_div_ps(vemo1, vepo1); __m256 vy2 = _mm256_div_ps(vemo2, vepo2); __m256 vy3 = _mm256_div_ps(vemo3, vepo3); __m256 vy4 = _mm256_div_ps(vemo4, vepo4); __m256 vy5 = _mm256_div_ps(vemo5, vepo5); __m256 vy6 = _mm256_div_ps(vemo6, vepo6); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); __m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT); __m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT); __m128i vh6 = _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); vh4 = _mm_xor_si128(vh4, vinvsignx4); vh5 = _mm_xor_si128(vh5, vinvsignx5); vh6 = _mm_xor_si128(vh6, vinvsignx6); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); _mm_storeu_si128((__m128i*) (o + 32), vh4); _mm_storeu_si128((__m128i*) (o + 40), vh5); _mm_storeu_si128((__m128i*) (o + 48), vh6); o += 56; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
13,680
42.570064
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-expm1minus-rr1-p3h2ts-div-x64.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_expm1minus_rr1_p3h2ts_div_x64( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 64 * sizeof(uint16_t); batch -= 64 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32)); const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40)); const __m128i vx6 = _mm_loadu_si128((const __m128i*) (i + 48)); const __m128i vx7 = _mm_loadu_si128((const __m128i*) (i + 56)); i += 64; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask); const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask); const __m128i vabsx6 = _mm_or_si128(vx6, vsign_mask); const __m128i vabsx7 = _mm_or_si128(vx7, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); __m256 vz4 = _mm256_cvtph_ps(vabsx4); const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4); __m256 vz5 = _mm256_cvtph_ps(vabsx5); const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5); __m256 vz6 = _mm256_cvtph_ps(vabsx6); const __m128i vinvsignx6 = _mm_xor_si128(vx6, vabsx6); __m256 vz7 = _mm256_cvtph_ps(vabsx7); const __m128i vinvsignx7 = _mm_xor_si128(vx7, vabsx7); vz0 = _mm256_max_ps(vsat_cutoff, vz0); __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias); vz1 = _mm256_max_ps(vsat_cutoff, vz1); __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias); vz2 = _mm256_max_ps(vsat_cutoff, vz2); __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias); vz3 = _mm256_max_ps(vsat_cutoff, vz3); __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias); vz4 = _mm256_max_ps(vsat_cutoff, vz4); __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias); vz5 = _mm256_max_ps(vsat_cutoff, vz5); __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias); vz6 = _mm256_max_ps(vsat_cutoff, vz6); __m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias); vz7 = _mm256_max_ps(vsat_cutoff, vz7); __m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1); __m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23))); vn4 = _mm256_sub_ps(vn4, vmagic_bias); const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1); __m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23))); vn5 = _mm256_sub_ps(vn5, vmagic_bias); const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1); __m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23))); vn6 = _mm256_sub_ps(vn6, vmagic_bias); const __m128 vn7_hi = _mm256_extractf128_ps(vn7, 1); __m256 vs7 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23))); vn7 = _mm256_sub_ps(vn7, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23)); const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23)); const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23)); const __m128 vs7_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn7_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1); vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1); vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1); vs7 = _mm256_insertf128_ps(vs7, vs7_hi, 1); const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0); const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1); const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2); const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3); const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4); const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5); const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6); const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7); __m256 vp0 = vc3; __m256 vp1 = vc3; __m256 vp2 = vc3; __m256 vp3 = vc3; __m256 vp4 = vc3; __m256 vp5 = vc3; __m256 vp6 = vc3; __m256 vp7 = vc3; vp0 = _mm256_fmadd_ps(vp0, vt0, vc2); vp1 = _mm256_fmadd_ps(vp1, vt1, vc2); vp2 = _mm256_fmadd_ps(vp2, vt2, vc2); vp3 = _mm256_fmadd_ps(vp3, vt3, vc2); vp4 = _mm256_fmadd_ps(vp4, vt4, vc2); vp5 = _mm256_fmadd_ps(vp5, vt5, vc2); vp6 = _mm256_fmadd_ps(vp6, vt6, vc2); vp7 = _mm256_fmadd_ps(vp7, vt7, vc2); vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo); vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo); vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo); vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo); vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo); vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo); vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo); vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vts4 = _mm256_mul_ps(vt4, vs4); const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one); const __m256 vts5 = _mm256_mul_ps(vt5, vs5); const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one); const __m256 vts6 = _mm256_mul_ps(vt6, vs6); const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one); const __m256 vts7 = _mm256_mul_ps(vt7, vs7); const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one); const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0); const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1); const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2); const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3); const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4); const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5); const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6); const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo); const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo); const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo); const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo); __m256 vy0 = _mm256_div_ps(vemo0, vepo0); __m256 vy1 = _mm256_div_ps(vemo1, vepo1); __m256 vy2 = _mm256_div_ps(vemo2, vepo2); __m256 vy3 = _mm256_div_ps(vemo3, vepo3); __m256 vy4 = _mm256_div_ps(vemo4, vepo4); __m256 vy5 = _mm256_div_ps(vemo5, vepo5); __m256 vy6 = _mm256_div_ps(vemo6, vepo6); __m256 vy7 = _mm256_div_ps(vemo7, vepo7); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); __m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT); __m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT); __m128i vh6 = _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT); __m128i vh7 = _mm256_cvtps_ph(vy7, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); vh4 = _mm_xor_si128(vh4, vinvsignx4); vh5 = _mm_xor_si128(vh5, vinvsignx5); vh6 = _mm_xor_si128(vh6, vinvsignx6); vh7 = _mm_xor_si128(vh7, vinvsignx7); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); _mm_storeu_si128((__m128i*) (o + 32), vh4); _mm_storeu_si128((__m128i*) (o + 40), vh5); _mm_storeu_si128((__m128i*) (o + 48), vh6); _mm_storeu_si128((__m128i*) (o + 56), vh7); o += 64; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
14,965
43.409496
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-expm1minus-rr1-p3h2ts-div-x72.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_expm1minus_rr1_p3h2ts_div_x72( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 72 * sizeof(uint16_t); batch -= 72 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32)); const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40)); const __m128i vx6 = _mm_loadu_si128((const __m128i*) (i + 48)); const __m128i vx7 = _mm_loadu_si128((const __m128i*) (i + 56)); const __m128i vx8 = _mm_loadu_si128((const __m128i*) (i + 64)); i += 72; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask); const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask); const __m128i vabsx6 = _mm_or_si128(vx6, vsign_mask); const __m128i vabsx7 = _mm_or_si128(vx7, vsign_mask); const __m128i vabsx8 = _mm_or_si128(vx8, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); __m256 vz4 = _mm256_cvtph_ps(vabsx4); const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4); __m256 vz5 = _mm256_cvtph_ps(vabsx5); const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5); __m256 vz6 = _mm256_cvtph_ps(vabsx6); const __m128i vinvsignx6 = _mm_xor_si128(vx6, vabsx6); __m256 vz7 = _mm256_cvtph_ps(vabsx7); const __m128i vinvsignx7 = _mm_xor_si128(vx7, vabsx7); __m256 vz8 = _mm256_cvtph_ps(vabsx8); const __m128i vinvsignx8 = _mm_xor_si128(vx8, vabsx8); vz0 = _mm256_max_ps(vsat_cutoff, vz0); __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias); vz1 = _mm256_max_ps(vsat_cutoff, vz1); __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias); vz2 = _mm256_max_ps(vsat_cutoff, vz2); __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias); vz3 = _mm256_max_ps(vsat_cutoff, vz3); __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias); vz4 = _mm256_max_ps(vsat_cutoff, vz4); __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias); vz5 = _mm256_max_ps(vsat_cutoff, vz5); __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias); vz6 = _mm256_max_ps(vsat_cutoff, vz6); __m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias); vz7 = _mm256_max_ps(vsat_cutoff, vz7); __m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias); vz8 = _mm256_max_ps(vsat_cutoff, vz8); __m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1); __m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23))); vn4 = _mm256_sub_ps(vn4, vmagic_bias); const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1); __m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23))); vn5 = _mm256_sub_ps(vn5, vmagic_bias); const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1); __m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23))); vn6 = _mm256_sub_ps(vn6, vmagic_bias); const __m128 vn7_hi = _mm256_extractf128_ps(vn7, 1); __m256 vs7 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23))); vn7 = _mm256_sub_ps(vn7, vmagic_bias); const __m128 vn8_hi = _mm256_extractf128_ps(vn8, 1); __m256 vs8 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn8)), 23))); vn8 = _mm256_sub_ps(vn8, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23)); const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23)); const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23)); const __m128 vs7_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn7_hi), 23)); const __m128 vs8_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn8_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1); vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1); vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1); vs7 = _mm256_insertf128_ps(vs7, vs7_hi, 1); vs8 = _mm256_insertf128_ps(vs8, vs8_hi, 1); const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0); const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1); const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2); const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3); const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4); const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5); const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6); const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7); const __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8); __m256 vp0 = vc3; __m256 vp1 = vc3; __m256 vp2 = vc3; __m256 vp3 = vc3; __m256 vp4 = vc3; __m256 vp5 = vc3; __m256 vp6 = vc3; __m256 vp7 = vc3; __m256 vp8 = vc3; vp0 = _mm256_fmadd_ps(vp0, vt0, vc2); vp1 = _mm256_fmadd_ps(vp1, vt1, vc2); vp2 = _mm256_fmadd_ps(vp2, vt2, vc2); vp3 = _mm256_fmadd_ps(vp3, vt3, vc2); vp4 = _mm256_fmadd_ps(vp4, vt4, vc2); vp5 = _mm256_fmadd_ps(vp5, vt5, vc2); vp6 = _mm256_fmadd_ps(vp6, vt6, vc2); vp7 = _mm256_fmadd_ps(vp7, vt7, vc2); vp8 = _mm256_fmadd_ps(vp8, vt8, vc2); vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo); vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo); vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo); vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo); vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo); vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo); vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo); vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo); vp8 = _mm256_fmadd_ps(vp8, vt8, vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vts4 = _mm256_mul_ps(vt4, vs4); const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one); const __m256 vts5 = _mm256_mul_ps(vt5, vs5); const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one); const __m256 vts6 = _mm256_mul_ps(vt6, vs6); const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one); const __m256 vts7 = _mm256_mul_ps(vt7, vs7); const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one); const __m256 vts8 = _mm256_mul_ps(vt8, vs8); const __m256 vsmo8 = _mm256_add_ps(vs8, vminus_one); const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0); const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1); const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2); const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3); const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4); const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5); const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6); const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7); const __m256 vemo8 = _mm256_fmadd_ps(vp8, vts8, vsmo8); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo); const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo); const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo); const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo); const __m256 vepo8 = _mm256_add_ps(vemo8, vtwo); __m256 vy0 = _mm256_div_ps(vemo0, vepo0); __m256 vy1 = _mm256_div_ps(vemo1, vepo1); __m256 vy2 = _mm256_div_ps(vemo2, vepo2); __m256 vy3 = _mm256_div_ps(vemo3, vepo3); __m256 vy4 = _mm256_div_ps(vemo4, vepo4); __m256 vy5 = _mm256_div_ps(vemo5, vepo5); __m256 vy6 = _mm256_div_ps(vemo6, vepo6); __m256 vy7 = _mm256_div_ps(vemo7, vepo7); __m256 vy8 = _mm256_div_ps(vemo8, vepo8); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); __m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT); __m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT); __m128i vh6 = _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT); __m128i vh7 = _mm256_cvtps_ph(vy7, _MM_FROUND_TO_NEAREST_INT); __m128i vh8 = _mm256_cvtps_ph(vy8, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); vh4 = _mm_xor_si128(vh4, vinvsignx4); vh5 = _mm_xor_si128(vh5, vinvsignx5); vh6 = _mm_xor_si128(vh6, vinvsignx6); vh7 = _mm_xor_si128(vh7, vinvsignx7); vh8 = _mm_xor_si128(vh8, vinvsignx8); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); _mm_storeu_si128((__m128i*) (o + 32), vh4); _mm_storeu_si128((__m128i*) (o + 40), vh5); _mm_storeu_si128((__m128i*) (o + 48), vh6); _mm_storeu_si128((__m128i*) (o + 56), vh7); _mm_storeu_si128((__m128i*) (o + 64), vh8); o += 72; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
16,250
44.141667
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-expm1minus-rr1-p3h2ts-div-x8.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_expm1minus_rr1_p3h2ts_div_x8( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
4,581
32.940741
123
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-expm1minus-rr1-p3h2ts-div-x80.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_expm1minus_rr1_p3h2ts_div_x80( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 80 * sizeof(uint16_t); batch -= 80 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32)); const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40)); const __m128i vx6 = _mm_loadu_si128((const __m128i*) (i + 48)); const __m128i vx7 = _mm_loadu_si128((const __m128i*) (i + 56)); const __m128i vx8 = _mm_loadu_si128((const __m128i*) (i + 64)); const __m128i vx9 = _mm_loadu_si128((const __m128i*) (i + 72)); i += 80; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask); const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask); const __m128i vabsx6 = _mm_or_si128(vx6, vsign_mask); const __m128i vabsx7 = _mm_or_si128(vx7, vsign_mask); const __m128i vabsx8 = _mm_or_si128(vx8, vsign_mask); const __m128i vabsx9 = _mm_or_si128(vx9, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); __m256 vz4 = _mm256_cvtph_ps(vabsx4); const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4); __m256 vz5 = _mm256_cvtph_ps(vabsx5); const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5); __m256 vz6 = _mm256_cvtph_ps(vabsx6); const __m128i vinvsignx6 = _mm_xor_si128(vx6, vabsx6); __m256 vz7 = _mm256_cvtph_ps(vabsx7); const __m128i vinvsignx7 = _mm_xor_si128(vx7, vabsx7); __m256 vz8 = _mm256_cvtph_ps(vabsx8); const __m128i vinvsignx8 = _mm_xor_si128(vx8, vabsx8); __m256 vz9 = _mm256_cvtph_ps(vabsx9); const __m128i vinvsignx9 = _mm_xor_si128(vx9, vabsx9); vz0 = _mm256_max_ps(vsat_cutoff, vz0); __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias); vz1 = _mm256_max_ps(vsat_cutoff, vz1); __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias); vz2 = _mm256_max_ps(vsat_cutoff, vz2); __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias); vz3 = _mm256_max_ps(vsat_cutoff, vz3); __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias); vz4 = _mm256_max_ps(vsat_cutoff, vz4); __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias); vz5 = _mm256_max_ps(vsat_cutoff, vz5); __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias); vz6 = _mm256_max_ps(vsat_cutoff, vz6); __m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias); vz7 = _mm256_max_ps(vsat_cutoff, vz7); __m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias); vz8 = _mm256_max_ps(vsat_cutoff, vz8); __m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias); vz9 = _mm256_max_ps(vsat_cutoff, vz9); __m256 vn9 = _mm256_fmadd_ps(vz9, vlog2e, vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1); __m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23))); vn4 = _mm256_sub_ps(vn4, vmagic_bias); const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1); __m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23))); vn5 = _mm256_sub_ps(vn5, vmagic_bias); const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1); __m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23))); vn6 = _mm256_sub_ps(vn6, vmagic_bias); const __m128 vn7_hi = _mm256_extractf128_ps(vn7, 1); __m256 vs7 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23))); vn7 = _mm256_sub_ps(vn7, vmagic_bias); const __m128 vn8_hi = _mm256_extractf128_ps(vn8, 1); __m256 vs8 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn8)), 23))); vn8 = _mm256_sub_ps(vn8, vmagic_bias); const __m128 vn9_hi = _mm256_extractf128_ps(vn9, 1); __m256 vs9 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn9)), 23))); vn9 = _mm256_sub_ps(vn9, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23)); const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23)); const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23)); const __m128 vs7_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn7_hi), 23)); const __m128 vs8_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn8_hi), 23)); const __m128 vs9_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn9_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1); vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1); vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1); vs7 = _mm256_insertf128_ps(vs7, vs7_hi, 1); vs8 = _mm256_insertf128_ps(vs8, vs8_hi, 1); vs9 = _mm256_insertf128_ps(vs9, vs9_hi, 1); const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0); const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1); const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2); const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3); const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4); const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5); const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6); const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7); const __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8); const __m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vz9); __m256 vp0 = vc3; __m256 vp1 = vc3; __m256 vp2 = vc3; __m256 vp3 = vc3; __m256 vp4 = vc3; __m256 vp5 = vc3; __m256 vp6 = vc3; __m256 vp7 = vc3; __m256 vp8 = vc3; __m256 vp9 = vc3; vp0 = _mm256_fmadd_ps(vp0, vt0, vc2); vp1 = _mm256_fmadd_ps(vp1, vt1, vc2); vp2 = _mm256_fmadd_ps(vp2, vt2, vc2); vp3 = _mm256_fmadd_ps(vp3, vt3, vc2); vp4 = _mm256_fmadd_ps(vp4, vt4, vc2); vp5 = _mm256_fmadd_ps(vp5, vt5, vc2); vp6 = _mm256_fmadd_ps(vp6, vt6, vc2); vp7 = _mm256_fmadd_ps(vp7, vt7, vc2); vp8 = _mm256_fmadd_ps(vp8, vt8, vc2); vp9 = _mm256_fmadd_ps(vp9, vt9, vc2); vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo); vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo); vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo); vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo); vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo); vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo); vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo); vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo); vp8 = _mm256_fmadd_ps(vp8, vt8, vtwo); vp9 = _mm256_fmadd_ps(vp9, vt9, vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vts4 = _mm256_mul_ps(vt4, vs4); const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one); const __m256 vts5 = _mm256_mul_ps(vt5, vs5); const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one); const __m256 vts6 = _mm256_mul_ps(vt6, vs6); const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one); const __m256 vts7 = _mm256_mul_ps(vt7, vs7); const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one); const __m256 vts8 = _mm256_mul_ps(vt8, vs8); const __m256 vsmo8 = _mm256_add_ps(vs8, vminus_one); const __m256 vts9 = _mm256_mul_ps(vt9, vs9); const __m256 vsmo9 = _mm256_add_ps(vs9, vminus_one); const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0); const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1); const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2); const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3); const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4); const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5); const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6); const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7); const __m256 vemo8 = _mm256_fmadd_ps(vp8, vts8, vsmo8); const __m256 vemo9 = _mm256_fmadd_ps(vp9, vts9, vsmo9); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo); const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo); const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo); const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo); const __m256 vepo8 = _mm256_add_ps(vemo8, vtwo); const __m256 vepo9 = _mm256_add_ps(vemo9, vtwo); __m256 vy0 = _mm256_div_ps(vemo0, vepo0); __m256 vy1 = _mm256_div_ps(vemo1, vepo1); __m256 vy2 = _mm256_div_ps(vemo2, vepo2); __m256 vy3 = _mm256_div_ps(vemo3, vepo3); __m256 vy4 = _mm256_div_ps(vemo4, vepo4); __m256 vy5 = _mm256_div_ps(vemo5, vepo5); __m256 vy6 = _mm256_div_ps(vemo6, vepo6); __m256 vy7 = _mm256_div_ps(vemo7, vepo7); __m256 vy8 = _mm256_div_ps(vemo8, vepo8); __m256 vy9 = _mm256_div_ps(vemo9, vepo9); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); __m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT); __m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT); __m128i vh6 = _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT); __m128i vh7 = _mm256_cvtps_ph(vy7, _MM_FROUND_TO_NEAREST_INT); __m128i vh8 = _mm256_cvtps_ph(vy8, _MM_FROUND_TO_NEAREST_INT); __m128i vh9 = _mm256_cvtps_ph(vy9, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); vh4 = _mm_xor_si128(vh4, vinvsignx4); vh5 = _mm_xor_si128(vh5, vinvsignx5); vh6 = _mm_xor_si128(vh6, vinvsignx6); vh7 = _mm_xor_si128(vh7, vinvsignx7); vh8 = _mm_xor_si128(vh8, vinvsignx8); vh9 = _mm_xor_si128(vh9, vinvsignx9); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); _mm_storeu_si128((__m128i*) (o + 32), vh4); _mm_storeu_si128((__m128i*) (o + 40), vh5); _mm_storeu_si128((__m128i*) (o + 48), vh6); _mm_storeu_si128((__m128i*) (o + 56), vh7); _mm_storeu_si128((__m128i*) (o + 64), vh8); _mm_storeu_si128((__m128i*) (o + 72), vh9); o += 80; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); vz = _mm256_max_ps(vsat_cutoff, vz); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vy = _mm256_div_ps(vemo, vepo); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
17,535
44.785901
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-expm1minus-rr1-p3h2ts-rcp-x16.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_expm1minus_rr1_p3h2ts_rcp_x16( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); i += 16; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS); __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias); const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS); __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0); const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1); __m256 vp0 = vc3; __m256 vp1 = vc3; vp0 = _mm256_fmadd_ps(vp0, vt0, vc2); vp1 = _mm256_fmadd_ps(vp1, vt1, vc2); vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo); vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0); const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); __m256 vrepo0 = _mm256_rcp_ps(vepo0); __m256 vrepo1 = _mm256_rcp_ps(vepo1); __m256 vy0 = _mm256_mul_ps(vemo0, vrepo0); __m256 vy1 = _mm256_mul_ps(vemo1, vrepo1); vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0); vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); o += 16; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
7,720
35.766667
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-expm1minus-rr1-p3h2ts-rcp-x24.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_expm1minus_rr1_p3h2ts_rcp_x24( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 24 * sizeof(uint16_t); batch -= 24 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); i += 24; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS); __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias); const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS); __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias); const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS); __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0); const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1); const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2); __m256 vp0 = vc3; __m256 vp1 = vc3; __m256 vp2 = vc3; vp0 = _mm256_fmadd_ps(vp0, vt0, vc2); vp1 = _mm256_fmadd_ps(vp1, vt1, vc2); vp2 = _mm256_fmadd_ps(vp2, vt2, vc2); vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo); vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo); vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0); const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1); const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); __m256 vrepo0 = _mm256_rcp_ps(vepo0); __m256 vrepo1 = _mm256_rcp_ps(vepo1); __m256 vrepo2 = _mm256_rcp_ps(vepo2); __m256 vy0 = _mm256_mul_ps(vemo0, vrepo0); __m256 vy1 = _mm256_mul_ps(vemo1, vrepo1); __m256 vy2 = _mm256_mul_ps(vemo2, vrepo2); vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0); vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1); vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); o += 24; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
9,123
37.825532
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-expm1minus-rr1-p3h2ts-rcp-x32.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_expm1minus_rr1_p3h2ts_rcp_x32( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); i += 32; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS); __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias); const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS); __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias); const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS); __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias); const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS); __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0); const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1); const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2); const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3); __m256 vp0 = vc3; __m256 vp1 = vc3; __m256 vp2 = vc3; __m256 vp3 = vc3; vp0 = _mm256_fmadd_ps(vp0, vt0, vc2); vp1 = _mm256_fmadd_ps(vp1, vt1, vc2); vp2 = _mm256_fmadd_ps(vp2, vt2, vc2); vp3 = _mm256_fmadd_ps(vp3, vt3, vc2); vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo); vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo); vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo); vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0); const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1); const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2); const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); __m256 vrepo0 = _mm256_rcp_ps(vepo0); __m256 vrepo1 = _mm256_rcp_ps(vepo1); __m256 vrepo2 = _mm256_rcp_ps(vepo2); __m256 vrepo3 = _mm256_rcp_ps(vepo3); __m256 vy0 = _mm256_mul_ps(vemo0, vrepo0); __m256 vy1 = _mm256_mul_ps(vemo1, vrepo1); __m256 vy2 = _mm256_mul_ps(vemo2, vrepo2); __m256 vy3 = _mm256_mul_ps(vemo3, vrepo3); vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0); vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1); vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2); vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); o += 32; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
10,526
39.488462
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-expm1minus-rr1-p3h2ts-rcp-x40.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_expm1minus_rr1_p3h2ts_rcp_x40( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 40 * sizeof(uint16_t); batch -= 40 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32)); i += 40; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); __m256 vz4 = _mm256_cvtph_ps(vabsx4); const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4); const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS); __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias); const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS); __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias); const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS); __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias); const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS); __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias); const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS); __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1); __m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23))); vn4 = _mm256_sub_ps(vn4, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1); const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0); const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1); const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2); const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3); const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4); __m256 vp0 = vc3; __m256 vp1 = vc3; __m256 vp2 = vc3; __m256 vp3 = vc3; __m256 vp4 = vc3; vp0 = _mm256_fmadd_ps(vp0, vt0, vc2); vp1 = _mm256_fmadd_ps(vp1, vt1, vc2); vp2 = _mm256_fmadd_ps(vp2, vt2, vc2); vp3 = _mm256_fmadd_ps(vp3, vt3, vc2); vp4 = _mm256_fmadd_ps(vp4, vt4, vc2); vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo); vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo); vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo); vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo); vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vts4 = _mm256_mul_ps(vt4, vs4); const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one); const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0); const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1); const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2); const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3); const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo); __m256 vrepo0 = _mm256_rcp_ps(vepo0); __m256 vrepo1 = _mm256_rcp_ps(vepo1); __m256 vrepo2 = _mm256_rcp_ps(vepo2); __m256 vrepo3 = _mm256_rcp_ps(vepo3); __m256 vrepo4 = _mm256_rcp_ps(vepo4); __m256 vy0 = _mm256_mul_ps(vemo0, vrepo0); __m256 vy1 = _mm256_mul_ps(vemo1, vrepo1); __m256 vy2 = _mm256_mul_ps(vemo2, vrepo2); __m256 vy3 = _mm256_mul_ps(vemo3, vrepo3); __m256 vy4 = _mm256_mul_ps(vemo4, vrepo4); vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0); vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1); vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2); vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3); vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); __m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); vh4 = _mm_xor_si128(vh4, vinvsignx4); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); _mm_storeu_si128((__m128i*) (o + 32), vh4); o += 40; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
11,929
40.859649
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-expm1minus-rr1-p3h2ts-rcp-x48.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_expm1minus_rr1_p3h2ts_rcp_x48( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 48 * sizeof(uint16_t); batch -= 48 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32)); const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40)); i += 48; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask); const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); __m256 vz4 = _mm256_cvtph_ps(vabsx4); const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4); __m256 vz5 = _mm256_cvtph_ps(vabsx5); const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5); const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS); __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias); const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS); __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias); const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS); __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias); const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS); __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias); const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS); __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias); const __m256 vm5 = _mm256_cmp_ps(vz5, vsat_cutoff, _CMP_LE_OS); __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1); __m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23))); vn4 = _mm256_sub_ps(vn4, vmagic_bias); const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1); __m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23))); vn5 = _mm256_sub_ps(vn5, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23)); const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1); vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1); const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0); const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1); const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2); const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3); const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4); const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5); __m256 vp0 = vc3; __m256 vp1 = vc3; __m256 vp2 = vc3; __m256 vp3 = vc3; __m256 vp4 = vc3; __m256 vp5 = vc3; vp0 = _mm256_fmadd_ps(vp0, vt0, vc2); vp1 = _mm256_fmadd_ps(vp1, vt1, vc2); vp2 = _mm256_fmadd_ps(vp2, vt2, vc2); vp3 = _mm256_fmadd_ps(vp3, vt3, vc2); vp4 = _mm256_fmadd_ps(vp4, vt4, vc2); vp5 = _mm256_fmadd_ps(vp5, vt5, vc2); vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo); vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo); vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo); vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo); vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo); vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vts4 = _mm256_mul_ps(vt4, vs4); const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one); const __m256 vts5 = _mm256_mul_ps(vt5, vs5); const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one); const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0); const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1); const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2); const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3); const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4); const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo); const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo); __m256 vrepo0 = _mm256_rcp_ps(vepo0); __m256 vrepo1 = _mm256_rcp_ps(vepo1); __m256 vrepo2 = _mm256_rcp_ps(vepo2); __m256 vrepo3 = _mm256_rcp_ps(vepo3); __m256 vrepo4 = _mm256_rcp_ps(vepo4); __m256 vrepo5 = _mm256_rcp_ps(vepo5); __m256 vy0 = _mm256_mul_ps(vemo0, vrepo0); __m256 vy1 = _mm256_mul_ps(vemo1, vrepo1); __m256 vy2 = _mm256_mul_ps(vemo2, vrepo2); __m256 vy3 = _mm256_mul_ps(vemo3, vrepo3); __m256 vy4 = _mm256_mul_ps(vemo4, vrepo4); __m256 vy5 = _mm256_mul_ps(vemo5, vrepo5); vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0); vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1); vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2); vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3); vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4); vy5 = _mm256_blendv_ps(vy5, vminus_one, vm5); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); __m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT); __m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); vh4 = _mm_xor_si128(vh4, vinvsignx4); vh5 = _mm_xor_si128(vh5, vinvsignx5); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); _mm_storeu_si128((__m128i*) (o + 32), vh4); _mm_storeu_si128((__m128i*) (o + 40), vh5); o += 48; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
13,332
42.009677
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-expm1minus-rr1-p3h2ts-rcp-x56.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_expm1minus_rr1_p3h2ts_rcp_x56( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 56 * sizeof(uint16_t); batch -= 56 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32)); const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40)); const __m128i vx6 = _mm_loadu_si128((const __m128i*) (i + 48)); i += 56; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask); const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask); const __m128i vabsx6 = _mm_or_si128(vx6, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); __m256 vz4 = _mm256_cvtph_ps(vabsx4); const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4); __m256 vz5 = _mm256_cvtph_ps(vabsx5); const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5); __m256 vz6 = _mm256_cvtph_ps(vabsx6); const __m128i vinvsignx6 = _mm_xor_si128(vx6, vabsx6); const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS); __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias); const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS); __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias); const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS); __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias); const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS); __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias); const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS); __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias); const __m256 vm5 = _mm256_cmp_ps(vz5, vsat_cutoff, _CMP_LE_OS); __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias); const __m256 vm6 = _mm256_cmp_ps(vz6, vsat_cutoff, _CMP_LE_OS); __m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1); __m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23))); vn4 = _mm256_sub_ps(vn4, vmagic_bias); const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1); __m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23))); vn5 = _mm256_sub_ps(vn5, vmagic_bias); const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1); __m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23))); vn6 = _mm256_sub_ps(vn6, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23)); const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23)); const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1); vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1); vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1); const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0); const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1); const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2); const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3); const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4); const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5); const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6); __m256 vp0 = vc3; __m256 vp1 = vc3; __m256 vp2 = vc3; __m256 vp3 = vc3; __m256 vp4 = vc3; __m256 vp5 = vc3; __m256 vp6 = vc3; vp0 = _mm256_fmadd_ps(vp0, vt0, vc2); vp1 = _mm256_fmadd_ps(vp1, vt1, vc2); vp2 = _mm256_fmadd_ps(vp2, vt2, vc2); vp3 = _mm256_fmadd_ps(vp3, vt3, vc2); vp4 = _mm256_fmadd_ps(vp4, vt4, vc2); vp5 = _mm256_fmadd_ps(vp5, vt5, vc2); vp6 = _mm256_fmadd_ps(vp6, vt6, vc2); vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo); vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo); vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo); vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo); vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo); vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo); vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vts4 = _mm256_mul_ps(vt4, vs4); const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one); const __m256 vts5 = _mm256_mul_ps(vt5, vs5); const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one); const __m256 vts6 = _mm256_mul_ps(vt6, vs6); const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one); const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0); const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1); const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2); const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3); const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4); const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5); const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo); const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo); const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo); __m256 vrepo0 = _mm256_rcp_ps(vepo0); __m256 vrepo1 = _mm256_rcp_ps(vepo1); __m256 vrepo2 = _mm256_rcp_ps(vepo2); __m256 vrepo3 = _mm256_rcp_ps(vepo3); __m256 vrepo4 = _mm256_rcp_ps(vepo4); __m256 vrepo5 = _mm256_rcp_ps(vepo5); __m256 vrepo6 = _mm256_rcp_ps(vepo6); __m256 vy0 = _mm256_mul_ps(vemo0, vrepo0); __m256 vy1 = _mm256_mul_ps(vemo1, vrepo1); __m256 vy2 = _mm256_mul_ps(vemo2, vrepo2); __m256 vy3 = _mm256_mul_ps(vemo3, vrepo3); __m256 vy4 = _mm256_mul_ps(vemo4, vrepo4); __m256 vy5 = _mm256_mul_ps(vemo5, vrepo5); __m256 vy6 = _mm256_mul_ps(vemo6, vrepo6); vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0); vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1); vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2); vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3); vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4); vy5 = _mm256_blendv_ps(vy5, vminus_one, vm5); vy6 = _mm256_blendv_ps(vy6, vminus_one, vm6); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); __m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT); __m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT); __m128i vh6 = _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); vh4 = _mm_xor_si128(vh4, vinvsignx4); vh5 = _mm_xor_si128(vh5, vinvsignx5); vh6 = _mm_xor_si128(vh6, vinvsignx6); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); _mm_storeu_si128((__m128i*) (o + 32), vh4); _mm_storeu_si128((__m128i*) (o + 40), vh5); _mm_storeu_si128((__m128i*) (o + 48), vh6); o += 56; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
14,735
42.98806
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-expm1minus-rr1-p3h2ts-rcp-x64.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_expm1minus_rr1_p3h2ts_rcp_x64( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 64 * sizeof(uint16_t); batch -= 64 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32)); const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40)); const __m128i vx6 = _mm_loadu_si128((const __m128i*) (i + 48)); const __m128i vx7 = _mm_loadu_si128((const __m128i*) (i + 56)); i += 64; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask); const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask); const __m128i vabsx6 = _mm_or_si128(vx6, vsign_mask); const __m128i vabsx7 = _mm_or_si128(vx7, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); __m256 vz4 = _mm256_cvtph_ps(vabsx4); const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4); __m256 vz5 = _mm256_cvtph_ps(vabsx5); const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5); __m256 vz6 = _mm256_cvtph_ps(vabsx6); const __m128i vinvsignx6 = _mm_xor_si128(vx6, vabsx6); __m256 vz7 = _mm256_cvtph_ps(vabsx7); const __m128i vinvsignx7 = _mm_xor_si128(vx7, vabsx7); const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS); __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias); const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS); __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias); const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS); __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias); const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS); __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias); const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS); __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias); const __m256 vm5 = _mm256_cmp_ps(vz5, vsat_cutoff, _CMP_LE_OS); __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias); const __m256 vm6 = _mm256_cmp_ps(vz6, vsat_cutoff, _CMP_LE_OS); __m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias); const __m256 vm7 = _mm256_cmp_ps(vz7, vsat_cutoff, _CMP_LE_OS); __m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1); __m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23))); vn4 = _mm256_sub_ps(vn4, vmagic_bias); const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1); __m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23))); vn5 = _mm256_sub_ps(vn5, vmagic_bias); const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1); __m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23))); vn6 = _mm256_sub_ps(vn6, vmagic_bias); const __m128 vn7_hi = _mm256_extractf128_ps(vn7, 1); __m256 vs7 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23))); vn7 = _mm256_sub_ps(vn7, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23)); const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23)); const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23)); const __m128 vs7_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn7_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1); vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1); vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1); vs7 = _mm256_insertf128_ps(vs7, vs7_hi, 1); const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0); const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1); const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2); const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3); const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4); const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5); const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6); const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7); __m256 vp0 = vc3; __m256 vp1 = vc3; __m256 vp2 = vc3; __m256 vp3 = vc3; __m256 vp4 = vc3; __m256 vp5 = vc3; __m256 vp6 = vc3; __m256 vp7 = vc3; vp0 = _mm256_fmadd_ps(vp0, vt0, vc2); vp1 = _mm256_fmadd_ps(vp1, vt1, vc2); vp2 = _mm256_fmadd_ps(vp2, vt2, vc2); vp3 = _mm256_fmadd_ps(vp3, vt3, vc2); vp4 = _mm256_fmadd_ps(vp4, vt4, vc2); vp5 = _mm256_fmadd_ps(vp5, vt5, vc2); vp6 = _mm256_fmadd_ps(vp6, vt6, vc2); vp7 = _mm256_fmadd_ps(vp7, vt7, vc2); vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo); vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo); vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo); vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo); vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo); vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo); vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo); vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vts4 = _mm256_mul_ps(vt4, vs4); const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one); const __m256 vts5 = _mm256_mul_ps(vt5, vs5); const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one); const __m256 vts6 = _mm256_mul_ps(vt6, vs6); const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one); const __m256 vts7 = _mm256_mul_ps(vt7, vs7); const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one); const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0); const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1); const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2); const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3); const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4); const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5); const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6); const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo); const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo); const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo); const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo); __m256 vrepo0 = _mm256_rcp_ps(vepo0); __m256 vrepo1 = _mm256_rcp_ps(vepo1); __m256 vrepo2 = _mm256_rcp_ps(vepo2); __m256 vrepo3 = _mm256_rcp_ps(vepo3); __m256 vrepo4 = _mm256_rcp_ps(vepo4); __m256 vrepo5 = _mm256_rcp_ps(vepo5); __m256 vrepo6 = _mm256_rcp_ps(vepo6); __m256 vrepo7 = _mm256_rcp_ps(vepo7); __m256 vy0 = _mm256_mul_ps(vemo0, vrepo0); __m256 vy1 = _mm256_mul_ps(vemo1, vrepo1); __m256 vy2 = _mm256_mul_ps(vemo2, vrepo2); __m256 vy3 = _mm256_mul_ps(vemo3, vrepo3); __m256 vy4 = _mm256_mul_ps(vemo4, vrepo4); __m256 vy5 = _mm256_mul_ps(vemo5, vrepo5); __m256 vy6 = _mm256_mul_ps(vemo6, vrepo6); __m256 vy7 = _mm256_mul_ps(vemo7, vrepo7); vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0); vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1); vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2); vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3); vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4); vy5 = _mm256_blendv_ps(vy5, vminus_one, vm5); vy6 = _mm256_blendv_ps(vy6, vminus_one, vm6); vy7 = _mm256_blendv_ps(vy7, vminus_one, vm7); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); __m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT); __m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT); __m128i vh6 = _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT); __m128i vh7 = _mm256_cvtps_ph(vy7, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); vh4 = _mm_xor_si128(vh4, vinvsignx4); vh5 = _mm_xor_si128(vh5, vinvsignx5); vh6 = _mm_xor_si128(vh6, vinvsignx6); vh7 = _mm_xor_si128(vh7, vinvsignx7); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); _mm_storeu_si128((__m128i*) (o + 32), vh4); _mm_storeu_si128((__m128i*) (o + 40), vh5); _mm_storeu_si128((__m128i*) (o + 48), vh6); _mm_storeu_si128((__m128i*) (o + 56), vh7); o += 64; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
16,138
43.830556
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-expm1minus-rr1-p3h2ts-rcp-x72.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_expm1minus_rr1_p3h2ts_rcp_x72( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 72 * sizeof(uint16_t); batch -= 72 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32)); const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40)); const __m128i vx6 = _mm_loadu_si128((const __m128i*) (i + 48)); const __m128i vx7 = _mm_loadu_si128((const __m128i*) (i + 56)); const __m128i vx8 = _mm_loadu_si128((const __m128i*) (i + 64)); i += 72; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask); const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask); const __m128i vabsx6 = _mm_or_si128(vx6, vsign_mask); const __m128i vabsx7 = _mm_or_si128(vx7, vsign_mask); const __m128i vabsx8 = _mm_or_si128(vx8, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); __m256 vz4 = _mm256_cvtph_ps(vabsx4); const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4); __m256 vz5 = _mm256_cvtph_ps(vabsx5); const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5); __m256 vz6 = _mm256_cvtph_ps(vabsx6); const __m128i vinvsignx6 = _mm_xor_si128(vx6, vabsx6); __m256 vz7 = _mm256_cvtph_ps(vabsx7); const __m128i vinvsignx7 = _mm_xor_si128(vx7, vabsx7); __m256 vz8 = _mm256_cvtph_ps(vabsx8); const __m128i vinvsignx8 = _mm_xor_si128(vx8, vabsx8); const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS); __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias); const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS); __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias); const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS); __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias); const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS); __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias); const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS); __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias); const __m256 vm5 = _mm256_cmp_ps(vz5, vsat_cutoff, _CMP_LE_OS); __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias); const __m256 vm6 = _mm256_cmp_ps(vz6, vsat_cutoff, _CMP_LE_OS); __m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias); const __m256 vm7 = _mm256_cmp_ps(vz7, vsat_cutoff, _CMP_LE_OS); __m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias); const __m256 vm8 = _mm256_cmp_ps(vz8, vsat_cutoff, _CMP_LE_OS); __m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1); __m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23))); vn4 = _mm256_sub_ps(vn4, vmagic_bias); const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1); __m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23))); vn5 = _mm256_sub_ps(vn5, vmagic_bias); const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1); __m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23))); vn6 = _mm256_sub_ps(vn6, vmagic_bias); const __m128 vn7_hi = _mm256_extractf128_ps(vn7, 1); __m256 vs7 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23))); vn7 = _mm256_sub_ps(vn7, vmagic_bias); const __m128 vn8_hi = _mm256_extractf128_ps(vn8, 1); __m256 vs8 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn8)), 23))); vn8 = _mm256_sub_ps(vn8, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23)); const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23)); const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23)); const __m128 vs7_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn7_hi), 23)); const __m128 vs8_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn8_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1); vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1); vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1); vs7 = _mm256_insertf128_ps(vs7, vs7_hi, 1); vs8 = _mm256_insertf128_ps(vs8, vs8_hi, 1); const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0); const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1); const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2); const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3); const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4); const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5); const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6); const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7); const __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8); __m256 vp0 = vc3; __m256 vp1 = vc3; __m256 vp2 = vc3; __m256 vp3 = vc3; __m256 vp4 = vc3; __m256 vp5 = vc3; __m256 vp6 = vc3; __m256 vp7 = vc3; __m256 vp8 = vc3; vp0 = _mm256_fmadd_ps(vp0, vt0, vc2); vp1 = _mm256_fmadd_ps(vp1, vt1, vc2); vp2 = _mm256_fmadd_ps(vp2, vt2, vc2); vp3 = _mm256_fmadd_ps(vp3, vt3, vc2); vp4 = _mm256_fmadd_ps(vp4, vt4, vc2); vp5 = _mm256_fmadd_ps(vp5, vt5, vc2); vp6 = _mm256_fmadd_ps(vp6, vt6, vc2); vp7 = _mm256_fmadd_ps(vp7, vt7, vc2); vp8 = _mm256_fmadd_ps(vp8, vt8, vc2); vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo); vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo); vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo); vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo); vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo); vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo); vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo); vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo); vp8 = _mm256_fmadd_ps(vp8, vt8, vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vts4 = _mm256_mul_ps(vt4, vs4); const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one); const __m256 vts5 = _mm256_mul_ps(vt5, vs5); const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one); const __m256 vts6 = _mm256_mul_ps(vt6, vs6); const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one); const __m256 vts7 = _mm256_mul_ps(vt7, vs7); const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one); const __m256 vts8 = _mm256_mul_ps(vt8, vs8); const __m256 vsmo8 = _mm256_add_ps(vs8, vminus_one); const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0); const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1); const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2); const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3); const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4); const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5); const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6); const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7); const __m256 vemo8 = _mm256_fmadd_ps(vp8, vts8, vsmo8); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo); const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo); const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo); const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo); const __m256 vepo8 = _mm256_add_ps(vemo8, vtwo); __m256 vrepo0 = _mm256_rcp_ps(vepo0); __m256 vrepo1 = _mm256_rcp_ps(vepo1); __m256 vrepo2 = _mm256_rcp_ps(vepo2); __m256 vrepo3 = _mm256_rcp_ps(vepo3); __m256 vrepo4 = _mm256_rcp_ps(vepo4); __m256 vrepo5 = _mm256_rcp_ps(vepo5); __m256 vrepo6 = _mm256_rcp_ps(vepo6); __m256 vrepo7 = _mm256_rcp_ps(vepo7); __m256 vrepo8 = _mm256_rcp_ps(vepo8); __m256 vy0 = _mm256_mul_ps(vemo0, vrepo0); __m256 vy1 = _mm256_mul_ps(vemo1, vrepo1); __m256 vy2 = _mm256_mul_ps(vemo2, vrepo2); __m256 vy3 = _mm256_mul_ps(vemo3, vrepo3); __m256 vy4 = _mm256_mul_ps(vemo4, vrepo4); __m256 vy5 = _mm256_mul_ps(vemo5, vrepo5); __m256 vy6 = _mm256_mul_ps(vemo6, vrepo6); __m256 vy7 = _mm256_mul_ps(vemo7, vrepo7); __m256 vy8 = _mm256_mul_ps(vemo8, vrepo8); vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0); vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1); vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2); vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3); vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4); vy5 = _mm256_blendv_ps(vy5, vminus_one, vm5); vy6 = _mm256_blendv_ps(vy6, vminus_one, vm6); vy7 = _mm256_blendv_ps(vy7, vminus_one, vm7); vy8 = _mm256_blendv_ps(vy8, vminus_one, vm8); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); __m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT); __m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT); __m128i vh6 = _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT); __m128i vh7 = _mm256_cvtps_ph(vy7, _MM_FROUND_TO_NEAREST_INT); __m128i vh8 = _mm256_cvtps_ph(vy8, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); vh4 = _mm_xor_si128(vh4, vinvsignx4); vh5 = _mm_xor_si128(vh5, vinvsignx5); vh6 = _mm_xor_si128(vh6, vinvsignx6); vh7 = _mm_xor_si128(vh7, vinvsignx7); vh8 = _mm_xor_si128(vh8, vinvsignx8); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); _mm_storeu_si128((__m128i*) (o + 32), vh4); _mm_storeu_si128((__m128i*) (o + 40), vh5); _mm_storeu_si128((__m128i*) (o + 48), vh6); _mm_storeu_si128((__m128i*) (o + 56), vh7); _mm_storeu_si128((__m128i*) (o + 64), vh8); o += 72; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
17,541
44.563636
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-expm1minus-rr1-p3h2ts-rcp-x8.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_expm1minus_rr1_p3h2ts_rcp_x8( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
4,809
33.113475
123
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-expm1minus-rr1-p3h2ts-rcp-x80.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <stdint.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_expm1minus_rr1_p3h2ts_rcp_x80( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->avx_expm1minus_rr1_p3h2.sign_mask); const __m256 vsat_cutoff = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.sat_cutoff); const __m256 vlog2e = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.log2e); const __m256 vmagic_bias = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.magic_bias); const __m256 vminus_ln2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_ln2); const __m256 vc3 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c3); const __m256 vc2 = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.c2); const __m256 vtwo = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.two); const __m256 vminus_one = _mm256_load_ps(params->avx_expm1minus_rr1_p3h2.minus_one); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 80 * sizeof(uint16_t); batch -= 80 * sizeof(uint16_t)) { const __m128i vx0 = _mm_loadu_si128((const __m128i*) i); const __m128i vx1 = _mm_loadu_si128((const __m128i*) (i + 8)); const __m128i vx2 = _mm_loadu_si128((const __m128i*) (i + 16)); const __m128i vx3 = _mm_loadu_si128((const __m128i*) (i + 24)); const __m128i vx4 = _mm_loadu_si128((const __m128i*) (i + 32)); const __m128i vx5 = _mm_loadu_si128((const __m128i*) (i + 40)); const __m128i vx6 = _mm_loadu_si128((const __m128i*) (i + 48)); const __m128i vx7 = _mm_loadu_si128((const __m128i*) (i + 56)); const __m128i vx8 = _mm_loadu_si128((const __m128i*) (i + 64)); const __m128i vx9 = _mm_loadu_si128((const __m128i*) (i + 72)); i += 80; const __m128i vabsx0 = _mm_or_si128(vx0, vsign_mask); const __m128i vabsx1 = _mm_or_si128(vx1, vsign_mask); const __m128i vabsx2 = _mm_or_si128(vx2, vsign_mask); const __m128i vabsx3 = _mm_or_si128(vx3, vsign_mask); const __m128i vabsx4 = _mm_or_si128(vx4, vsign_mask); const __m128i vabsx5 = _mm_or_si128(vx5, vsign_mask); const __m128i vabsx6 = _mm_or_si128(vx6, vsign_mask); const __m128i vabsx7 = _mm_or_si128(vx7, vsign_mask); const __m128i vabsx8 = _mm_or_si128(vx8, vsign_mask); const __m128i vabsx9 = _mm_or_si128(vx9, vsign_mask); __m256 vz0 = _mm256_cvtph_ps(vabsx0); const __m128i vinvsignx0 = _mm_xor_si128(vx0, vabsx0); __m256 vz1 = _mm256_cvtph_ps(vabsx1); const __m128i vinvsignx1 = _mm_xor_si128(vx1, vabsx1); __m256 vz2 = _mm256_cvtph_ps(vabsx2); const __m128i vinvsignx2 = _mm_xor_si128(vx2, vabsx2); __m256 vz3 = _mm256_cvtph_ps(vabsx3); const __m128i vinvsignx3 = _mm_xor_si128(vx3, vabsx3); __m256 vz4 = _mm256_cvtph_ps(vabsx4); const __m128i vinvsignx4 = _mm_xor_si128(vx4, vabsx4); __m256 vz5 = _mm256_cvtph_ps(vabsx5); const __m128i vinvsignx5 = _mm_xor_si128(vx5, vabsx5); __m256 vz6 = _mm256_cvtph_ps(vabsx6); const __m128i vinvsignx6 = _mm_xor_si128(vx6, vabsx6); __m256 vz7 = _mm256_cvtph_ps(vabsx7); const __m128i vinvsignx7 = _mm_xor_si128(vx7, vabsx7); __m256 vz8 = _mm256_cvtph_ps(vabsx8); const __m128i vinvsignx8 = _mm_xor_si128(vx8, vabsx8); __m256 vz9 = _mm256_cvtph_ps(vabsx9); const __m128i vinvsignx9 = _mm_xor_si128(vx9, vabsx9); const __m256 vm0 = _mm256_cmp_ps(vz0, vsat_cutoff, _CMP_LE_OS); __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias); const __m256 vm1 = _mm256_cmp_ps(vz1, vsat_cutoff, _CMP_LE_OS); __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias); const __m256 vm2 = _mm256_cmp_ps(vz2, vsat_cutoff, _CMP_LE_OS); __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias); const __m256 vm3 = _mm256_cmp_ps(vz3, vsat_cutoff, _CMP_LE_OS); __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias); const __m256 vm4 = _mm256_cmp_ps(vz4, vsat_cutoff, _CMP_LE_OS); __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias); const __m256 vm5 = _mm256_cmp_ps(vz5, vsat_cutoff, _CMP_LE_OS); __m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias); const __m256 vm6 = _mm256_cmp_ps(vz6, vsat_cutoff, _CMP_LE_OS); __m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias); const __m256 vm7 = _mm256_cmp_ps(vz7, vsat_cutoff, _CMP_LE_OS); __m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias); const __m256 vm8 = _mm256_cmp_ps(vz8, vsat_cutoff, _CMP_LE_OS); __m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias); const __m256 vm9 = _mm256_cmp_ps(vz9, vsat_cutoff, _CMP_LE_OS); __m256 vn9 = _mm256_fmadd_ps(vz9, vlog2e, vmagic_bias); const __m128 vn0_hi = _mm256_extractf128_ps(vn0, 1); __m256 vs0 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23))); vn0 = _mm256_sub_ps(vn0, vmagic_bias); const __m128 vn1_hi = _mm256_extractf128_ps(vn1, 1); __m256 vs1 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23))); vn1 = _mm256_sub_ps(vn1, vmagic_bias); const __m128 vn2_hi = _mm256_extractf128_ps(vn2, 1); __m256 vs2 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23))); vn2 = _mm256_sub_ps(vn2, vmagic_bias); const __m128 vn3_hi = _mm256_extractf128_ps(vn3, 1); __m256 vs3 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23))); vn3 = _mm256_sub_ps(vn3, vmagic_bias); const __m128 vn4_hi = _mm256_extractf128_ps(vn4, 1); __m256 vs4 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23))); vn4 = _mm256_sub_ps(vn4, vmagic_bias); const __m128 vn5_hi = _mm256_extractf128_ps(vn5, 1); __m256 vs5 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23))); vn5 = _mm256_sub_ps(vn5, vmagic_bias); const __m128 vn6_hi = _mm256_extractf128_ps(vn6, 1); __m256 vs6 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23))); vn6 = _mm256_sub_ps(vn6, vmagic_bias); const __m128 vn7_hi = _mm256_extractf128_ps(vn7, 1); __m256 vs7 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23))); vn7 = _mm256_sub_ps(vn7, vmagic_bias); const __m128 vn8_hi = _mm256_extractf128_ps(vn8, 1); __m256 vs8 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn8)), 23))); vn8 = _mm256_sub_ps(vn8, vmagic_bias); const __m128 vn9_hi = _mm256_extractf128_ps(vn9, 1); __m256 vs9 = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn9)), 23))); vn9 = _mm256_sub_ps(vn9, vmagic_bias); const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0_hi), 23)); const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn1_hi), 23)); const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn2_hi), 23)); const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn3_hi), 23)); const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4_hi), 23)); const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn5_hi), 23)); const __m128 vs6_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn6_hi), 23)); const __m128 vs7_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn7_hi), 23)); const __m128 vs8_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn8_hi), 23)); const __m128 vs9_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn9_hi), 23)); vs0 = _mm256_insertf128_ps(vs0, vs0_hi, 1); vs1 = _mm256_insertf128_ps(vs1, vs1_hi, 1); vs2 = _mm256_insertf128_ps(vs2, vs2_hi, 1); vs3 = _mm256_insertf128_ps(vs3, vs3_hi, 1); vs4 = _mm256_insertf128_ps(vs4, vs4_hi, 1); vs5 = _mm256_insertf128_ps(vs5, vs5_hi, 1); vs6 = _mm256_insertf128_ps(vs6, vs6_hi, 1); vs7 = _mm256_insertf128_ps(vs7, vs7_hi, 1); vs8 = _mm256_insertf128_ps(vs8, vs8_hi, 1); vs9 = _mm256_insertf128_ps(vs9, vs9_hi, 1); const __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0); const __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1); const __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2); const __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3); const __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4); const __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5); const __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6); const __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7); const __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8); const __m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vz9); __m256 vp0 = vc3; __m256 vp1 = vc3; __m256 vp2 = vc3; __m256 vp3 = vc3; __m256 vp4 = vc3; __m256 vp5 = vc3; __m256 vp6 = vc3; __m256 vp7 = vc3; __m256 vp8 = vc3; __m256 vp9 = vc3; vp0 = _mm256_fmadd_ps(vp0, vt0, vc2); vp1 = _mm256_fmadd_ps(vp1, vt1, vc2); vp2 = _mm256_fmadd_ps(vp2, vt2, vc2); vp3 = _mm256_fmadd_ps(vp3, vt3, vc2); vp4 = _mm256_fmadd_ps(vp4, vt4, vc2); vp5 = _mm256_fmadd_ps(vp5, vt5, vc2); vp6 = _mm256_fmadd_ps(vp6, vt6, vc2); vp7 = _mm256_fmadd_ps(vp7, vt7, vc2); vp8 = _mm256_fmadd_ps(vp8, vt8, vc2); vp9 = _mm256_fmadd_ps(vp9, vt9, vc2); vp0 = _mm256_fmadd_ps(vp0, vt0, vtwo); vp1 = _mm256_fmadd_ps(vp1, vt1, vtwo); vp2 = _mm256_fmadd_ps(vp2, vt2, vtwo); vp3 = _mm256_fmadd_ps(vp3, vt3, vtwo); vp4 = _mm256_fmadd_ps(vp4, vt4, vtwo); vp5 = _mm256_fmadd_ps(vp5, vt5, vtwo); vp6 = _mm256_fmadd_ps(vp6, vt6, vtwo); vp7 = _mm256_fmadd_ps(vp7, vt7, vtwo); vp8 = _mm256_fmadd_ps(vp8, vt8, vtwo); vp9 = _mm256_fmadd_ps(vp9, vt9, vtwo); const __m256 vts0 = _mm256_mul_ps(vt0, vs0); const __m256 vsmo0 = _mm256_add_ps(vs0, vminus_one); const __m256 vts1 = _mm256_mul_ps(vt1, vs1); const __m256 vsmo1 = _mm256_add_ps(vs1, vminus_one); const __m256 vts2 = _mm256_mul_ps(vt2, vs2); const __m256 vsmo2 = _mm256_add_ps(vs2, vminus_one); const __m256 vts3 = _mm256_mul_ps(vt3, vs3); const __m256 vsmo3 = _mm256_add_ps(vs3, vminus_one); const __m256 vts4 = _mm256_mul_ps(vt4, vs4); const __m256 vsmo4 = _mm256_add_ps(vs4, vminus_one); const __m256 vts5 = _mm256_mul_ps(vt5, vs5); const __m256 vsmo5 = _mm256_add_ps(vs5, vminus_one); const __m256 vts6 = _mm256_mul_ps(vt6, vs6); const __m256 vsmo6 = _mm256_add_ps(vs6, vminus_one); const __m256 vts7 = _mm256_mul_ps(vt7, vs7); const __m256 vsmo7 = _mm256_add_ps(vs7, vminus_one); const __m256 vts8 = _mm256_mul_ps(vt8, vs8); const __m256 vsmo8 = _mm256_add_ps(vs8, vminus_one); const __m256 vts9 = _mm256_mul_ps(vt9, vs9); const __m256 vsmo9 = _mm256_add_ps(vs9, vminus_one); const __m256 vemo0 = _mm256_fmadd_ps(vp0, vts0, vsmo0); const __m256 vemo1 = _mm256_fmadd_ps(vp1, vts1, vsmo1); const __m256 vemo2 = _mm256_fmadd_ps(vp2, vts2, vsmo2); const __m256 vemo3 = _mm256_fmadd_ps(vp3, vts3, vsmo3); const __m256 vemo4 = _mm256_fmadd_ps(vp4, vts4, vsmo4); const __m256 vemo5 = _mm256_fmadd_ps(vp5, vts5, vsmo5); const __m256 vemo6 = _mm256_fmadd_ps(vp6, vts6, vsmo6); const __m256 vemo7 = _mm256_fmadd_ps(vp7, vts7, vsmo7); const __m256 vemo8 = _mm256_fmadd_ps(vp8, vts8, vsmo8); const __m256 vemo9 = _mm256_fmadd_ps(vp9, vts9, vsmo9); const __m256 vepo0 = _mm256_add_ps(vemo0, vtwo); const __m256 vepo1 = _mm256_add_ps(vemo1, vtwo); const __m256 vepo2 = _mm256_add_ps(vemo2, vtwo); const __m256 vepo3 = _mm256_add_ps(vemo3, vtwo); const __m256 vepo4 = _mm256_add_ps(vemo4, vtwo); const __m256 vepo5 = _mm256_add_ps(vemo5, vtwo); const __m256 vepo6 = _mm256_add_ps(vemo6, vtwo); const __m256 vepo7 = _mm256_add_ps(vemo7, vtwo); const __m256 vepo8 = _mm256_add_ps(vemo8, vtwo); const __m256 vepo9 = _mm256_add_ps(vemo9, vtwo); __m256 vrepo0 = _mm256_rcp_ps(vepo0); __m256 vrepo1 = _mm256_rcp_ps(vepo1); __m256 vrepo2 = _mm256_rcp_ps(vepo2); __m256 vrepo3 = _mm256_rcp_ps(vepo3); __m256 vrepo4 = _mm256_rcp_ps(vepo4); __m256 vrepo5 = _mm256_rcp_ps(vepo5); __m256 vrepo6 = _mm256_rcp_ps(vepo6); __m256 vrepo7 = _mm256_rcp_ps(vepo7); __m256 vrepo8 = _mm256_rcp_ps(vepo8); __m256 vrepo9 = _mm256_rcp_ps(vepo9); __m256 vy0 = _mm256_mul_ps(vemo0, vrepo0); __m256 vy1 = _mm256_mul_ps(vemo1, vrepo1); __m256 vy2 = _mm256_mul_ps(vemo2, vrepo2); __m256 vy3 = _mm256_mul_ps(vemo3, vrepo3); __m256 vy4 = _mm256_mul_ps(vemo4, vrepo4); __m256 vy5 = _mm256_mul_ps(vemo5, vrepo5); __m256 vy6 = _mm256_mul_ps(vemo6, vrepo6); __m256 vy7 = _mm256_mul_ps(vemo7, vrepo7); __m256 vy8 = _mm256_mul_ps(vemo8, vrepo8); __m256 vy9 = _mm256_mul_ps(vemo9, vrepo9); vy0 = _mm256_blendv_ps(vy0, vminus_one, vm0); vy1 = _mm256_blendv_ps(vy1, vminus_one, vm1); vy2 = _mm256_blendv_ps(vy2, vminus_one, vm2); vy3 = _mm256_blendv_ps(vy3, vminus_one, vm3); vy4 = _mm256_blendv_ps(vy4, vminus_one, vm4); vy5 = _mm256_blendv_ps(vy5, vminus_one, vm5); vy6 = _mm256_blendv_ps(vy6, vminus_one, vm6); vy7 = _mm256_blendv_ps(vy7, vminus_one, vm7); vy8 = _mm256_blendv_ps(vy8, vminus_one, vm8); vy9 = _mm256_blendv_ps(vy9, vminus_one, vm9); __m128i vh0 = _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT); __m128i vh1 = _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT); __m128i vh2 = _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT); __m128i vh3 = _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT); __m128i vh4 = _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT); __m128i vh5 = _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT); __m128i vh6 = _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT); __m128i vh7 = _mm256_cvtps_ph(vy7, _MM_FROUND_TO_NEAREST_INT); __m128i vh8 = _mm256_cvtps_ph(vy8, _MM_FROUND_TO_NEAREST_INT); __m128i vh9 = _mm256_cvtps_ph(vy9, _MM_FROUND_TO_NEAREST_INT); vh0 = _mm_xor_si128(vh0, vinvsignx0); vh1 = _mm_xor_si128(vh1, vinvsignx1); vh2 = _mm_xor_si128(vh2, vinvsignx2); vh3 = _mm_xor_si128(vh3, vinvsignx3); vh4 = _mm_xor_si128(vh4, vinvsignx4); vh5 = _mm_xor_si128(vh5, vinvsignx5); vh6 = _mm_xor_si128(vh6, vinvsignx6); vh7 = _mm_xor_si128(vh7, vinvsignx7); vh8 = _mm_xor_si128(vh8, vinvsignx8); vh9 = _mm_xor_si128(vh9, vinvsignx9); _mm_storeu_si128((__m128i*) o, vh0); _mm_storeu_si128((__m128i*) (o + 8), vh1); _mm_storeu_si128((__m128i*) (o + 16), vh2); _mm_storeu_si128((__m128i*) (o + 24), vh3); _mm_storeu_si128((__m128i*) (o + 32), vh4); _mm_storeu_si128((__m128i*) (o + 40), vh5); _mm_storeu_si128((__m128i*) (o + 48), vh6); _mm_storeu_si128((__m128i*) (o + 56), vh7); _mm_storeu_si128((__m128i*) (o + 64), vh8); _mm_storeu_si128((__m128i*) (o + 72), vh9); o += 80; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); i += 8; const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); _mm_storeu_si128((__m128i*) o, vh); o += 8; } if (batch != 0) { const __m128i vx = _mm_loadu_si128((const __m128i*) i); const __m128i vabsx = _mm_or_si128(vx, vsign_mask); __m256 vz = _mm256_cvtph_ps(vabsx); const __m128i vinvsignx = _mm_xor_si128(vx, vabsx); const __m256 vm = _mm256_cmp_ps(vz, vsat_cutoff, _CMP_LE_OS); __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); const __m128 vn_hi = _mm256_extractf128_ps(vn, 1); __m256 vs = _mm256_castps128_ps256(_mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23))); const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn_hi), 23)); vs = _mm256_insertf128_ps(vs, vs_hi, 1); vn = _mm256_sub_ps(vn, vmagic_bias); const __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); __m256 vp = vc3; vp = _mm256_fmadd_ps(vp, vt, vc2); vp = _mm256_fmadd_ps(vp, vt, vtwo); const __m256 vts = _mm256_mul_ps(vt, vs); const __m256 vsmo = _mm256_add_ps(vs, vminus_one); const __m256 vemo = _mm256_fmadd_ps(vp, vts, vsmo); const __m256 vepo = _mm256_add_ps(vemo, vtwo); __m256 vrepo = _mm256_rcp_ps(vepo); __m256 vy = _mm256_mul_ps(vemo, vrepo); vy = _mm256_blendv_ps(vy, vminus_one, vm); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); vh = _mm_xor_si128(vh, vinvsignx); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
18,944
45.207317
125
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-polynomial-p19h9t2-x16.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-polynomial.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <math.h> #include <immintrin.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_polynomial_p19h9t2_x16( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m256 vneg_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.neg_sat_cutoff); const __m256 vpos_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.pos_sat_cutoff); const __m256 vc19 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c19); const __m256 vc17 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c17); const __m256 vc15 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c15); const __m256 vc13 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c13); const __m256 vc11 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c11); const __m256 vc9 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c9); const __m256 vc7 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c7); const __m256 vc5 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c5); const __m256 vc3 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c3); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) { __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8))); i += 16; vx0 = _mm256_max_ps(vneg_sat_cutoff, vx0); vx1 = _mm256_max_ps(vneg_sat_cutoff, vx1); vx0 = _mm256_min_ps(vpos_sat_cutoff, vx0); vx1 = _mm256_min_ps(vpos_sat_cutoff, vx1); const __m256 vt0 = _mm256_mul_ps(vx0, vx0); const __m256 vt1 = _mm256_mul_ps(vx1, vx1); __m256 vp0 = vc19; __m256 vp1 = vc19; vp0 = _mm256_fmadd_ps(vp0, vt0, vc17); vp1 = _mm256_fmadd_ps(vp1, vt1, vc17); vp0 = _mm256_fmadd_ps(vp0, vt0, vc15); vp1 = _mm256_fmadd_ps(vp1, vt1, vc15); vp0 = _mm256_fmadd_ps(vp0, vt0, vc13); vp1 = _mm256_fmadd_ps(vp1, vt1, vc13); vp0 = _mm256_fmadd_ps(vp0, vt0, vc11); vp1 = _mm256_fmadd_ps(vp1, vt1, vc11); vp0 = _mm256_fmadd_ps(vp0, vt0, vc9); vp1 = _mm256_fmadd_ps(vp1, vt1, vc9); vp0 = _mm256_fmadd_ps(vp0, vt0, vc7); vp1 = _mm256_fmadd_ps(vp1, vt1, vc7); vp0 = _mm256_fmadd_ps(vp0, vt0, vc5); vp1 = _mm256_fmadd_ps(vp1, vt1, vc5); vp0 = _mm256_fmadd_ps(vp0, vt0, vc3); vp1 = _mm256_fmadd_ps(vp1, vt1, vc3); const __m256 vxt0 = _mm256_mul_ps(vx0, vt0); const __m256 vxt1 = _mm256_mul_ps(vx1, vt1); const __m256 vy0 = _mm256_fmadd_ps(vp0, vxt0, vx0); const __m256 vy1 = _mm256_fmadd_ps(vp1, vxt1, vx1); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT)); o += 16; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); i += 8; vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = vc19; vp = _mm256_fmadd_ps(vp, vt, vc17); vp = _mm256_fmadd_ps(vp, vt, vc15); vp = _mm256_fmadd_ps(vp, vt, vc13); vp = _mm256_fmadd_ps(vp, vt, vc11); vp = _mm256_fmadd_ps(vp, vt, vc9); vp = _mm256_fmadd_ps(vp, vt, vc7); vp = _mm256_fmadd_ps(vp, vt, vc5); vp = _mm256_fmadd_ps(vp, vt, vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_fmadd_ps(vp, vxt, vx); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT)); o += 8; } if (batch != 0) { __m256 vx = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) i)); vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = vc19; vp = _mm256_fmadd_ps(vp, vt, vc17); vp = _mm256_fmadd_ps(vp, vt, vc15); vp = _mm256_fmadd_ps(vp, vt, vc13); vp = _mm256_fmadd_ps(vp, vt, vc11); vp = _mm256_fmadd_ps(vp, vt, vc9); vp = _mm256_fmadd_ps(vp, vt, vc7); vp = _mm256_fmadd_ps(vp, vt, vc5); vp = _mm256_fmadd_ps(vp, vt, vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_fmadd_ps(vp, vxt, vx); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
5,365
33.844156
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-polynomial-p19h9t2-x24.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-polynomial.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <math.h> #include <immintrin.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_polynomial_p19h9t2_x24( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m256 vneg_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.neg_sat_cutoff); const __m256 vpos_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.pos_sat_cutoff); const __m256 vc19 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c19); const __m256 vc17 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c17); const __m256 vc15 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c15); const __m256 vc13 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c13); const __m256 vc11 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c11); const __m256 vc9 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c9); const __m256 vc7 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c7); const __m256 vc5 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c5); const __m256 vc3 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c3); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 24 * sizeof(uint16_t); batch -= 24 * sizeof(uint16_t)) { __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8))); __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16))); i += 24; vx0 = _mm256_max_ps(vneg_sat_cutoff, vx0); vx1 = _mm256_max_ps(vneg_sat_cutoff, vx1); vx2 = _mm256_max_ps(vneg_sat_cutoff, vx2); vx0 = _mm256_min_ps(vpos_sat_cutoff, vx0); vx1 = _mm256_min_ps(vpos_sat_cutoff, vx1); vx2 = _mm256_min_ps(vpos_sat_cutoff, vx2); const __m256 vt0 = _mm256_mul_ps(vx0, vx0); const __m256 vt1 = _mm256_mul_ps(vx1, vx1); const __m256 vt2 = _mm256_mul_ps(vx2, vx2); __m256 vp0 = vc19; __m256 vp1 = vc19; __m256 vp2 = vc19; vp0 = _mm256_fmadd_ps(vp0, vt0, vc17); vp1 = _mm256_fmadd_ps(vp1, vt1, vc17); vp2 = _mm256_fmadd_ps(vp2, vt2, vc17); vp0 = _mm256_fmadd_ps(vp0, vt0, vc15); vp1 = _mm256_fmadd_ps(vp1, vt1, vc15); vp2 = _mm256_fmadd_ps(vp2, vt2, vc15); vp0 = _mm256_fmadd_ps(vp0, vt0, vc13); vp1 = _mm256_fmadd_ps(vp1, vt1, vc13); vp2 = _mm256_fmadd_ps(vp2, vt2, vc13); vp0 = _mm256_fmadd_ps(vp0, vt0, vc11); vp1 = _mm256_fmadd_ps(vp1, vt1, vc11); vp2 = _mm256_fmadd_ps(vp2, vt2, vc11); vp0 = _mm256_fmadd_ps(vp0, vt0, vc9); vp1 = _mm256_fmadd_ps(vp1, vt1, vc9); vp2 = _mm256_fmadd_ps(vp2, vt2, vc9); vp0 = _mm256_fmadd_ps(vp0, vt0, vc7); vp1 = _mm256_fmadd_ps(vp1, vt1, vc7); vp2 = _mm256_fmadd_ps(vp2, vt2, vc7); vp0 = _mm256_fmadd_ps(vp0, vt0, vc5); vp1 = _mm256_fmadd_ps(vp1, vt1, vc5); vp2 = _mm256_fmadd_ps(vp2, vt2, vc5); vp0 = _mm256_fmadd_ps(vp0, vt0, vc3); vp1 = _mm256_fmadd_ps(vp1, vt1, vc3); vp2 = _mm256_fmadd_ps(vp2, vt2, vc3); const __m256 vxt0 = _mm256_mul_ps(vx0, vt0); const __m256 vxt1 = _mm256_mul_ps(vx1, vt1); const __m256 vxt2 = _mm256_mul_ps(vx2, vt2); const __m256 vy0 = _mm256_fmadd_ps(vp0, vxt0, vx0); const __m256 vy1 = _mm256_fmadd_ps(vp1, vxt1, vx1); const __m256 vy2 = _mm256_fmadd_ps(vp2, vxt2, vx2); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT)); o += 24; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); i += 8; vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = vc19; vp = _mm256_fmadd_ps(vp, vt, vc17); vp = _mm256_fmadd_ps(vp, vt, vc15); vp = _mm256_fmadd_ps(vp, vt, vc13); vp = _mm256_fmadd_ps(vp, vt, vc11); vp = _mm256_fmadd_ps(vp, vt, vc9); vp = _mm256_fmadd_ps(vp, vt, vc7); vp = _mm256_fmadd_ps(vp, vt, vc5); vp = _mm256_fmadd_ps(vp, vt, vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_fmadd_ps(vp, vxt, vx); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT)); o += 8; } if (batch != 0) { __m256 vx = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) i)); vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = vc19; vp = _mm256_fmadd_ps(vp, vt, vc17); vp = _mm256_fmadd_ps(vp, vt, vc15); vp = _mm256_fmadd_ps(vp, vt, vc13); vp = _mm256_fmadd_ps(vp, vt, vc11); vp = _mm256_fmadd_ps(vp, vt, vc9); vp = _mm256_fmadd_ps(vp, vt, vc7); vp = _mm256_fmadd_ps(vp, vt, vc5); vp = _mm256_fmadd_ps(vp, vt, vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_fmadd_ps(vp, vxt, vx); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
6,145
35.152941
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-polynomial-p19h9t2-x32.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-polynomial.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <math.h> #include <immintrin.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_polynomial_p19h9t2_x32( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m256 vneg_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.neg_sat_cutoff); const __m256 vpos_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.pos_sat_cutoff); const __m256 vc19 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c19); const __m256 vc17 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c17); const __m256 vc15 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c15); const __m256 vc13 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c13); const __m256 vc11 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c11); const __m256 vc9 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c9); const __m256 vc7 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c7); const __m256 vc5 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c5); const __m256 vc3 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c3); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 32 * sizeof(uint16_t); batch -= 32 * sizeof(uint16_t)) { __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8))); __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16))); __m256 vx3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24))); i += 32; vx0 = _mm256_max_ps(vneg_sat_cutoff, vx0); vx1 = _mm256_max_ps(vneg_sat_cutoff, vx1); vx2 = _mm256_max_ps(vneg_sat_cutoff, vx2); vx3 = _mm256_max_ps(vneg_sat_cutoff, vx3); vx0 = _mm256_min_ps(vpos_sat_cutoff, vx0); vx1 = _mm256_min_ps(vpos_sat_cutoff, vx1); vx2 = _mm256_min_ps(vpos_sat_cutoff, vx2); vx3 = _mm256_min_ps(vpos_sat_cutoff, vx3); const __m256 vt0 = _mm256_mul_ps(vx0, vx0); const __m256 vt1 = _mm256_mul_ps(vx1, vx1); const __m256 vt2 = _mm256_mul_ps(vx2, vx2); const __m256 vt3 = _mm256_mul_ps(vx3, vx3); __m256 vp0 = vc19; __m256 vp1 = vc19; __m256 vp2 = vc19; __m256 vp3 = vc19; vp0 = _mm256_fmadd_ps(vp0, vt0, vc17); vp1 = _mm256_fmadd_ps(vp1, vt1, vc17); vp2 = _mm256_fmadd_ps(vp2, vt2, vc17); vp3 = _mm256_fmadd_ps(vp3, vt3, vc17); vp0 = _mm256_fmadd_ps(vp0, vt0, vc15); vp1 = _mm256_fmadd_ps(vp1, vt1, vc15); vp2 = _mm256_fmadd_ps(vp2, vt2, vc15); vp3 = _mm256_fmadd_ps(vp3, vt3, vc15); vp0 = _mm256_fmadd_ps(vp0, vt0, vc13); vp1 = _mm256_fmadd_ps(vp1, vt1, vc13); vp2 = _mm256_fmadd_ps(vp2, vt2, vc13); vp3 = _mm256_fmadd_ps(vp3, vt3, vc13); vp0 = _mm256_fmadd_ps(vp0, vt0, vc11); vp1 = _mm256_fmadd_ps(vp1, vt1, vc11); vp2 = _mm256_fmadd_ps(vp2, vt2, vc11); vp3 = _mm256_fmadd_ps(vp3, vt3, vc11); vp0 = _mm256_fmadd_ps(vp0, vt0, vc9); vp1 = _mm256_fmadd_ps(vp1, vt1, vc9); vp2 = _mm256_fmadd_ps(vp2, vt2, vc9); vp3 = _mm256_fmadd_ps(vp3, vt3, vc9); vp0 = _mm256_fmadd_ps(vp0, vt0, vc7); vp1 = _mm256_fmadd_ps(vp1, vt1, vc7); vp2 = _mm256_fmadd_ps(vp2, vt2, vc7); vp3 = _mm256_fmadd_ps(vp3, vt3, vc7); vp0 = _mm256_fmadd_ps(vp0, vt0, vc5); vp1 = _mm256_fmadd_ps(vp1, vt1, vc5); vp2 = _mm256_fmadd_ps(vp2, vt2, vc5); vp3 = _mm256_fmadd_ps(vp3, vt3, vc5); vp0 = _mm256_fmadd_ps(vp0, vt0, vc3); vp1 = _mm256_fmadd_ps(vp1, vt1, vc3); vp2 = _mm256_fmadd_ps(vp2, vt2, vc3); vp3 = _mm256_fmadd_ps(vp3, vt3, vc3); const __m256 vxt0 = _mm256_mul_ps(vx0, vt0); const __m256 vxt1 = _mm256_mul_ps(vx1, vt1); const __m256 vxt2 = _mm256_mul_ps(vx2, vt2); const __m256 vxt3 = _mm256_mul_ps(vx3, vt3); const __m256 vy0 = _mm256_fmadd_ps(vp0, vxt0, vx0); const __m256 vy1 = _mm256_fmadd_ps(vp1, vxt1, vx1); const __m256 vy2 = _mm256_fmadd_ps(vp2, vxt2, vx2); const __m256 vy3 = _mm256_fmadd_ps(vp3, vxt3, vx3); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT)); o += 32; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); i += 8; vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = vc19; vp = _mm256_fmadd_ps(vp, vt, vc17); vp = _mm256_fmadd_ps(vp, vt, vc15); vp = _mm256_fmadd_ps(vp, vt, vc13); vp = _mm256_fmadd_ps(vp, vt, vc11); vp = _mm256_fmadd_ps(vp, vt, vc9); vp = _mm256_fmadd_ps(vp, vt, vc7); vp = _mm256_fmadd_ps(vp, vt, vc5); vp = _mm256_fmadd_ps(vp, vt, vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_fmadd_ps(vp, vxt, vx); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT)); o += 8; } if (batch != 0) { __m256 vx = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) i)); vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = vc19; vp = _mm256_fmadd_ps(vp, vt, vc17); vp = _mm256_fmadd_ps(vp, vt, vc15); vp = _mm256_fmadd_ps(vp, vt, vc13); vp = _mm256_fmadd_ps(vp, vt, vc11); vp = _mm256_fmadd_ps(vp, vt, vc9); vp = _mm256_fmadd_ps(vp, vt, vc7); vp = _mm256_fmadd_ps(vp, vt, vc5); vp = _mm256_fmadd_ps(vp, vt, vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_fmadd_ps(vp, vxt, vx); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
6,925
36.236559
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-polynomial-p19h9t2-x40.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-polynomial.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <math.h> #include <immintrin.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_polynomial_p19h9t2_x40( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m256 vneg_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.neg_sat_cutoff); const __m256 vpos_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.pos_sat_cutoff); const __m256 vc19 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c19); const __m256 vc17 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c17); const __m256 vc15 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c15); const __m256 vc13 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c13); const __m256 vc11 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c11); const __m256 vc9 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c9); const __m256 vc7 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c7); const __m256 vc5 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c5); const __m256 vc3 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c3); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 40 * sizeof(uint16_t); batch -= 40 * sizeof(uint16_t)) { __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8))); __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16))); __m256 vx3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24))); __m256 vx4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32))); i += 40; vx0 = _mm256_max_ps(vneg_sat_cutoff, vx0); vx1 = _mm256_max_ps(vneg_sat_cutoff, vx1); vx2 = _mm256_max_ps(vneg_sat_cutoff, vx2); vx3 = _mm256_max_ps(vneg_sat_cutoff, vx3); vx4 = _mm256_max_ps(vneg_sat_cutoff, vx4); vx0 = _mm256_min_ps(vpos_sat_cutoff, vx0); vx1 = _mm256_min_ps(vpos_sat_cutoff, vx1); vx2 = _mm256_min_ps(vpos_sat_cutoff, vx2); vx3 = _mm256_min_ps(vpos_sat_cutoff, vx3); vx4 = _mm256_min_ps(vpos_sat_cutoff, vx4); const __m256 vt0 = _mm256_mul_ps(vx0, vx0); const __m256 vt1 = _mm256_mul_ps(vx1, vx1); const __m256 vt2 = _mm256_mul_ps(vx2, vx2); const __m256 vt3 = _mm256_mul_ps(vx3, vx3); const __m256 vt4 = _mm256_mul_ps(vx4, vx4); __m256 vp0 = vc19; __m256 vp1 = vc19; __m256 vp2 = vc19; __m256 vp3 = vc19; __m256 vp4 = vc19; vp0 = _mm256_fmadd_ps(vp0, vt0, vc17); vp1 = _mm256_fmadd_ps(vp1, vt1, vc17); vp2 = _mm256_fmadd_ps(vp2, vt2, vc17); vp3 = _mm256_fmadd_ps(vp3, vt3, vc17); vp4 = _mm256_fmadd_ps(vp4, vt4, vc17); vp0 = _mm256_fmadd_ps(vp0, vt0, vc15); vp1 = _mm256_fmadd_ps(vp1, vt1, vc15); vp2 = _mm256_fmadd_ps(vp2, vt2, vc15); vp3 = _mm256_fmadd_ps(vp3, vt3, vc15); vp4 = _mm256_fmadd_ps(vp4, vt4, vc15); vp0 = _mm256_fmadd_ps(vp0, vt0, vc13); vp1 = _mm256_fmadd_ps(vp1, vt1, vc13); vp2 = _mm256_fmadd_ps(vp2, vt2, vc13); vp3 = _mm256_fmadd_ps(vp3, vt3, vc13); vp4 = _mm256_fmadd_ps(vp4, vt4, vc13); vp0 = _mm256_fmadd_ps(vp0, vt0, vc11); vp1 = _mm256_fmadd_ps(vp1, vt1, vc11); vp2 = _mm256_fmadd_ps(vp2, vt2, vc11); vp3 = _mm256_fmadd_ps(vp3, vt3, vc11); vp4 = _mm256_fmadd_ps(vp4, vt4, vc11); vp0 = _mm256_fmadd_ps(vp0, vt0, vc9); vp1 = _mm256_fmadd_ps(vp1, vt1, vc9); vp2 = _mm256_fmadd_ps(vp2, vt2, vc9); vp3 = _mm256_fmadd_ps(vp3, vt3, vc9); vp4 = _mm256_fmadd_ps(vp4, vt4, vc9); vp0 = _mm256_fmadd_ps(vp0, vt0, vc7); vp1 = _mm256_fmadd_ps(vp1, vt1, vc7); vp2 = _mm256_fmadd_ps(vp2, vt2, vc7); vp3 = _mm256_fmadd_ps(vp3, vt3, vc7); vp4 = _mm256_fmadd_ps(vp4, vt4, vc7); vp0 = _mm256_fmadd_ps(vp0, vt0, vc5); vp1 = _mm256_fmadd_ps(vp1, vt1, vc5); vp2 = _mm256_fmadd_ps(vp2, vt2, vc5); vp3 = _mm256_fmadd_ps(vp3, vt3, vc5); vp4 = _mm256_fmadd_ps(vp4, vt4, vc5); vp0 = _mm256_fmadd_ps(vp0, vt0, vc3); vp1 = _mm256_fmadd_ps(vp1, vt1, vc3); vp2 = _mm256_fmadd_ps(vp2, vt2, vc3); vp3 = _mm256_fmadd_ps(vp3, vt3, vc3); vp4 = _mm256_fmadd_ps(vp4, vt4, vc3); const __m256 vxt0 = _mm256_mul_ps(vx0, vt0); const __m256 vxt1 = _mm256_mul_ps(vx1, vt1); const __m256 vxt2 = _mm256_mul_ps(vx2, vt2); const __m256 vxt3 = _mm256_mul_ps(vx3, vt3); const __m256 vxt4 = _mm256_mul_ps(vx4, vt4); const __m256 vy0 = _mm256_fmadd_ps(vp0, vxt0, vx0); const __m256 vy1 = _mm256_fmadd_ps(vp1, vxt1, vx1); const __m256 vy2 = _mm256_fmadd_ps(vp2, vxt2, vx2); const __m256 vy3 = _mm256_fmadd_ps(vp3, vxt3, vx3); const __m256 vy4 = _mm256_fmadd_ps(vp4, vxt4, vx4); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT)); o += 40; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); i += 8; vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = vc19; vp = _mm256_fmadd_ps(vp, vt, vc17); vp = _mm256_fmadd_ps(vp, vt, vc15); vp = _mm256_fmadd_ps(vp, vt, vc13); vp = _mm256_fmadd_ps(vp, vt, vc11); vp = _mm256_fmadd_ps(vp, vt, vc9); vp = _mm256_fmadd_ps(vp, vt, vc7); vp = _mm256_fmadd_ps(vp, vt, vc5); vp = _mm256_fmadd_ps(vp, vt, vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_fmadd_ps(vp, vxt, vx); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT)); o += 8; } if (batch != 0) { __m256 vx = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) i)); vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = vc19; vp = _mm256_fmadd_ps(vp, vt, vc17); vp = _mm256_fmadd_ps(vp, vt, vc15); vp = _mm256_fmadd_ps(vp, vt, vc13); vp = _mm256_fmadd_ps(vp, vt, vc11); vp = _mm256_fmadd_ps(vp, vt, vc9); vp = _mm256_fmadd_ps(vp, vt, vc7); vp = _mm256_fmadd_ps(vp, vt, vc5); vp = _mm256_fmadd_ps(vp, vt, vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_fmadd_ps(vp, vxt, vx); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
7,705
37.148515
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-polynomial-p19h9t2-x48.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-polynomial.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <math.h> #include <immintrin.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_polynomial_p19h9t2_x48( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m256 vneg_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.neg_sat_cutoff); const __m256 vpos_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.pos_sat_cutoff); const __m256 vc19 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c19); const __m256 vc17 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c17); const __m256 vc15 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c15); const __m256 vc13 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c13); const __m256 vc11 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c11); const __m256 vc9 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c9); const __m256 vc7 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c7); const __m256 vc5 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c5); const __m256 vc3 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c3); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 48 * sizeof(uint16_t); batch -= 48 * sizeof(uint16_t)) { __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8))); __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16))); __m256 vx3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24))); __m256 vx4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32))); __m256 vx5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40))); i += 48; vx0 = _mm256_max_ps(vneg_sat_cutoff, vx0); vx1 = _mm256_max_ps(vneg_sat_cutoff, vx1); vx2 = _mm256_max_ps(vneg_sat_cutoff, vx2); vx3 = _mm256_max_ps(vneg_sat_cutoff, vx3); vx4 = _mm256_max_ps(vneg_sat_cutoff, vx4); vx5 = _mm256_max_ps(vneg_sat_cutoff, vx5); vx0 = _mm256_min_ps(vpos_sat_cutoff, vx0); vx1 = _mm256_min_ps(vpos_sat_cutoff, vx1); vx2 = _mm256_min_ps(vpos_sat_cutoff, vx2); vx3 = _mm256_min_ps(vpos_sat_cutoff, vx3); vx4 = _mm256_min_ps(vpos_sat_cutoff, vx4); vx5 = _mm256_min_ps(vpos_sat_cutoff, vx5); const __m256 vt0 = _mm256_mul_ps(vx0, vx0); const __m256 vt1 = _mm256_mul_ps(vx1, vx1); const __m256 vt2 = _mm256_mul_ps(vx2, vx2); const __m256 vt3 = _mm256_mul_ps(vx3, vx3); const __m256 vt4 = _mm256_mul_ps(vx4, vx4); const __m256 vt5 = _mm256_mul_ps(vx5, vx5); __m256 vp0 = vc19; __m256 vp1 = vc19; __m256 vp2 = vc19; __m256 vp3 = vc19; __m256 vp4 = vc19; __m256 vp5 = vc19; vp0 = _mm256_fmadd_ps(vp0, vt0, vc17); vp1 = _mm256_fmadd_ps(vp1, vt1, vc17); vp2 = _mm256_fmadd_ps(vp2, vt2, vc17); vp3 = _mm256_fmadd_ps(vp3, vt3, vc17); vp4 = _mm256_fmadd_ps(vp4, vt4, vc17); vp5 = _mm256_fmadd_ps(vp5, vt5, vc17); vp0 = _mm256_fmadd_ps(vp0, vt0, vc15); vp1 = _mm256_fmadd_ps(vp1, vt1, vc15); vp2 = _mm256_fmadd_ps(vp2, vt2, vc15); vp3 = _mm256_fmadd_ps(vp3, vt3, vc15); vp4 = _mm256_fmadd_ps(vp4, vt4, vc15); vp5 = _mm256_fmadd_ps(vp5, vt5, vc15); vp0 = _mm256_fmadd_ps(vp0, vt0, vc13); vp1 = _mm256_fmadd_ps(vp1, vt1, vc13); vp2 = _mm256_fmadd_ps(vp2, vt2, vc13); vp3 = _mm256_fmadd_ps(vp3, vt3, vc13); vp4 = _mm256_fmadd_ps(vp4, vt4, vc13); vp5 = _mm256_fmadd_ps(vp5, vt5, vc13); vp0 = _mm256_fmadd_ps(vp0, vt0, vc11); vp1 = _mm256_fmadd_ps(vp1, vt1, vc11); vp2 = _mm256_fmadd_ps(vp2, vt2, vc11); vp3 = _mm256_fmadd_ps(vp3, vt3, vc11); vp4 = _mm256_fmadd_ps(vp4, vt4, vc11); vp5 = _mm256_fmadd_ps(vp5, vt5, vc11); vp0 = _mm256_fmadd_ps(vp0, vt0, vc9); vp1 = _mm256_fmadd_ps(vp1, vt1, vc9); vp2 = _mm256_fmadd_ps(vp2, vt2, vc9); vp3 = _mm256_fmadd_ps(vp3, vt3, vc9); vp4 = _mm256_fmadd_ps(vp4, vt4, vc9); vp5 = _mm256_fmadd_ps(vp5, vt5, vc9); vp0 = _mm256_fmadd_ps(vp0, vt0, vc7); vp1 = _mm256_fmadd_ps(vp1, vt1, vc7); vp2 = _mm256_fmadd_ps(vp2, vt2, vc7); vp3 = _mm256_fmadd_ps(vp3, vt3, vc7); vp4 = _mm256_fmadd_ps(vp4, vt4, vc7); vp5 = _mm256_fmadd_ps(vp5, vt5, vc7); vp0 = _mm256_fmadd_ps(vp0, vt0, vc5); vp1 = _mm256_fmadd_ps(vp1, vt1, vc5); vp2 = _mm256_fmadd_ps(vp2, vt2, vc5); vp3 = _mm256_fmadd_ps(vp3, vt3, vc5); vp4 = _mm256_fmadd_ps(vp4, vt4, vc5); vp5 = _mm256_fmadd_ps(vp5, vt5, vc5); vp0 = _mm256_fmadd_ps(vp0, vt0, vc3); vp1 = _mm256_fmadd_ps(vp1, vt1, vc3); vp2 = _mm256_fmadd_ps(vp2, vt2, vc3); vp3 = _mm256_fmadd_ps(vp3, vt3, vc3); vp4 = _mm256_fmadd_ps(vp4, vt4, vc3); vp5 = _mm256_fmadd_ps(vp5, vt5, vc3); const __m256 vxt0 = _mm256_mul_ps(vx0, vt0); const __m256 vxt1 = _mm256_mul_ps(vx1, vt1); const __m256 vxt2 = _mm256_mul_ps(vx2, vt2); const __m256 vxt3 = _mm256_mul_ps(vx3, vt3); const __m256 vxt4 = _mm256_mul_ps(vx4, vt4); const __m256 vxt5 = _mm256_mul_ps(vx5, vt5); const __m256 vy0 = _mm256_fmadd_ps(vp0, vxt0, vx0); const __m256 vy1 = _mm256_fmadd_ps(vp1, vxt1, vx1); const __m256 vy2 = _mm256_fmadd_ps(vp2, vxt2, vx2); const __m256 vy3 = _mm256_fmadd_ps(vp3, vxt3, vx3); const __m256 vy4 = _mm256_fmadd_ps(vp4, vxt4, vx4); const __m256 vy5 = _mm256_fmadd_ps(vp5, vxt5, vx5); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT)); o += 48; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); i += 8; vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = vc19; vp = _mm256_fmadd_ps(vp, vt, vc17); vp = _mm256_fmadd_ps(vp, vt, vc15); vp = _mm256_fmadd_ps(vp, vt, vc13); vp = _mm256_fmadd_ps(vp, vt, vc11); vp = _mm256_fmadd_ps(vp, vt, vc9); vp = _mm256_fmadd_ps(vp, vt, vc7); vp = _mm256_fmadd_ps(vp, vt, vc5); vp = _mm256_fmadd_ps(vp, vt, vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_fmadd_ps(vp, vxt, vx); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT)); o += 8; } if (batch != 0) { __m256 vx = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) i)); vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = vc19; vp = _mm256_fmadd_ps(vp, vt, vc17); vp = _mm256_fmadd_ps(vp, vt, vc15); vp = _mm256_fmadd_ps(vp, vt, vc13); vp = _mm256_fmadd_ps(vp, vt, vc11); vp = _mm256_fmadd_ps(vp, vt, vc9); vp = _mm256_fmadd_ps(vp, vt, vc7); vp = _mm256_fmadd_ps(vp, vt, vc5); vp = _mm256_fmadd_ps(vp, vt, vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_fmadd_ps(vp, vxt, vx); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
8,485
37.926606
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-polynomial-p19h9t2-x56.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-polynomial.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <math.h> #include <immintrin.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_polynomial_p19h9t2_x56( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m256 vneg_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.neg_sat_cutoff); const __m256 vpos_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.pos_sat_cutoff); const __m256 vc19 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c19); const __m256 vc17 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c17); const __m256 vc15 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c15); const __m256 vc13 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c13); const __m256 vc11 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c11); const __m256 vc9 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c9); const __m256 vc7 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c7); const __m256 vc5 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c5); const __m256 vc3 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c3); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 56 * sizeof(uint16_t); batch -= 56 * sizeof(uint16_t)) { __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8))); __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16))); __m256 vx3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24))); __m256 vx4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32))); __m256 vx5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40))); __m256 vx6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 48))); i += 56; vx0 = _mm256_max_ps(vneg_sat_cutoff, vx0); vx1 = _mm256_max_ps(vneg_sat_cutoff, vx1); vx2 = _mm256_max_ps(vneg_sat_cutoff, vx2); vx3 = _mm256_max_ps(vneg_sat_cutoff, vx3); vx4 = _mm256_max_ps(vneg_sat_cutoff, vx4); vx5 = _mm256_max_ps(vneg_sat_cutoff, vx5); vx6 = _mm256_max_ps(vneg_sat_cutoff, vx6); vx0 = _mm256_min_ps(vpos_sat_cutoff, vx0); vx1 = _mm256_min_ps(vpos_sat_cutoff, vx1); vx2 = _mm256_min_ps(vpos_sat_cutoff, vx2); vx3 = _mm256_min_ps(vpos_sat_cutoff, vx3); vx4 = _mm256_min_ps(vpos_sat_cutoff, vx4); vx5 = _mm256_min_ps(vpos_sat_cutoff, vx5); vx6 = _mm256_min_ps(vpos_sat_cutoff, vx6); const __m256 vt0 = _mm256_mul_ps(vx0, vx0); const __m256 vt1 = _mm256_mul_ps(vx1, vx1); const __m256 vt2 = _mm256_mul_ps(vx2, vx2); const __m256 vt3 = _mm256_mul_ps(vx3, vx3); const __m256 vt4 = _mm256_mul_ps(vx4, vx4); const __m256 vt5 = _mm256_mul_ps(vx5, vx5); const __m256 vt6 = _mm256_mul_ps(vx6, vx6); __m256 vp0 = vc19; __m256 vp1 = vc19; __m256 vp2 = vc19; __m256 vp3 = vc19; __m256 vp4 = vc19; __m256 vp5 = vc19; __m256 vp6 = vc19; vp0 = _mm256_fmadd_ps(vp0, vt0, vc17); vp1 = _mm256_fmadd_ps(vp1, vt1, vc17); vp2 = _mm256_fmadd_ps(vp2, vt2, vc17); vp3 = _mm256_fmadd_ps(vp3, vt3, vc17); vp4 = _mm256_fmadd_ps(vp4, vt4, vc17); vp5 = _mm256_fmadd_ps(vp5, vt5, vc17); vp6 = _mm256_fmadd_ps(vp6, vt6, vc17); vp0 = _mm256_fmadd_ps(vp0, vt0, vc15); vp1 = _mm256_fmadd_ps(vp1, vt1, vc15); vp2 = _mm256_fmadd_ps(vp2, vt2, vc15); vp3 = _mm256_fmadd_ps(vp3, vt3, vc15); vp4 = _mm256_fmadd_ps(vp4, vt4, vc15); vp5 = _mm256_fmadd_ps(vp5, vt5, vc15); vp6 = _mm256_fmadd_ps(vp6, vt6, vc15); vp0 = _mm256_fmadd_ps(vp0, vt0, vc13); vp1 = _mm256_fmadd_ps(vp1, vt1, vc13); vp2 = _mm256_fmadd_ps(vp2, vt2, vc13); vp3 = _mm256_fmadd_ps(vp3, vt3, vc13); vp4 = _mm256_fmadd_ps(vp4, vt4, vc13); vp5 = _mm256_fmadd_ps(vp5, vt5, vc13); vp6 = _mm256_fmadd_ps(vp6, vt6, vc13); vp0 = _mm256_fmadd_ps(vp0, vt0, vc11); vp1 = _mm256_fmadd_ps(vp1, vt1, vc11); vp2 = _mm256_fmadd_ps(vp2, vt2, vc11); vp3 = _mm256_fmadd_ps(vp3, vt3, vc11); vp4 = _mm256_fmadd_ps(vp4, vt4, vc11); vp5 = _mm256_fmadd_ps(vp5, vt5, vc11); vp6 = _mm256_fmadd_ps(vp6, vt6, vc11); vp0 = _mm256_fmadd_ps(vp0, vt0, vc9); vp1 = _mm256_fmadd_ps(vp1, vt1, vc9); vp2 = _mm256_fmadd_ps(vp2, vt2, vc9); vp3 = _mm256_fmadd_ps(vp3, vt3, vc9); vp4 = _mm256_fmadd_ps(vp4, vt4, vc9); vp5 = _mm256_fmadd_ps(vp5, vt5, vc9); vp6 = _mm256_fmadd_ps(vp6, vt6, vc9); vp0 = _mm256_fmadd_ps(vp0, vt0, vc7); vp1 = _mm256_fmadd_ps(vp1, vt1, vc7); vp2 = _mm256_fmadd_ps(vp2, vt2, vc7); vp3 = _mm256_fmadd_ps(vp3, vt3, vc7); vp4 = _mm256_fmadd_ps(vp4, vt4, vc7); vp5 = _mm256_fmadd_ps(vp5, vt5, vc7); vp6 = _mm256_fmadd_ps(vp6, vt6, vc7); vp0 = _mm256_fmadd_ps(vp0, vt0, vc5); vp1 = _mm256_fmadd_ps(vp1, vt1, vc5); vp2 = _mm256_fmadd_ps(vp2, vt2, vc5); vp3 = _mm256_fmadd_ps(vp3, vt3, vc5); vp4 = _mm256_fmadd_ps(vp4, vt4, vc5); vp5 = _mm256_fmadd_ps(vp5, vt5, vc5); vp6 = _mm256_fmadd_ps(vp6, vt6, vc5); vp0 = _mm256_fmadd_ps(vp0, vt0, vc3); vp1 = _mm256_fmadd_ps(vp1, vt1, vc3); vp2 = _mm256_fmadd_ps(vp2, vt2, vc3); vp3 = _mm256_fmadd_ps(vp3, vt3, vc3); vp4 = _mm256_fmadd_ps(vp4, vt4, vc3); vp5 = _mm256_fmadd_ps(vp5, vt5, vc3); vp6 = _mm256_fmadd_ps(vp6, vt6, vc3); const __m256 vxt0 = _mm256_mul_ps(vx0, vt0); const __m256 vxt1 = _mm256_mul_ps(vx1, vt1); const __m256 vxt2 = _mm256_mul_ps(vx2, vt2); const __m256 vxt3 = _mm256_mul_ps(vx3, vt3); const __m256 vxt4 = _mm256_mul_ps(vx4, vt4); const __m256 vxt5 = _mm256_mul_ps(vx5, vt5); const __m256 vxt6 = _mm256_mul_ps(vx6, vt6); const __m256 vy0 = _mm256_fmadd_ps(vp0, vxt0, vx0); const __m256 vy1 = _mm256_fmadd_ps(vp1, vxt1, vx1); const __m256 vy2 = _mm256_fmadd_ps(vp2, vxt2, vx2); const __m256 vy3 = _mm256_fmadd_ps(vp3, vxt3, vx3); const __m256 vy4 = _mm256_fmadd_ps(vp4, vxt4, vx4); const __m256 vy5 = _mm256_fmadd_ps(vp5, vxt5, vx5); const __m256 vy6 = _mm256_fmadd_ps(vp6, vxt6, vx6); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 48), _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT)); o += 56; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); i += 8; vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = vc19; vp = _mm256_fmadd_ps(vp, vt, vc17); vp = _mm256_fmadd_ps(vp, vt, vc15); vp = _mm256_fmadd_ps(vp, vt, vc13); vp = _mm256_fmadd_ps(vp, vt, vc11); vp = _mm256_fmadd_ps(vp, vt, vc9); vp = _mm256_fmadd_ps(vp, vt, vc7); vp = _mm256_fmadd_ps(vp, vt, vc5); vp = _mm256_fmadd_ps(vp, vt, vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_fmadd_ps(vp, vxt, vx); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT)); o += 8; } if (batch != 0) { __m256 vx = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) i)); vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = vc19; vp = _mm256_fmadd_ps(vp, vt, vc17); vp = _mm256_fmadd_ps(vp, vt, vc15); vp = _mm256_fmadd_ps(vp, vt, vc13); vp = _mm256_fmadd_ps(vp, vt, vc11); vp = _mm256_fmadd_ps(vp, vt, vc9); vp = _mm256_fmadd_ps(vp, vt, vc7); vp = _mm256_fmadd_ps(vp, vt, vc5); vp = _mm256_fmadd_ps(vp, vt, vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_fmadd_ps(vp, vxt, vx); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
9,265
38.598291
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-polynomial-p19h9t2-x64.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-polynomial.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <math.h> #include <immintrin.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_polynomial_p19h9t2_x64( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m256 vneg_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.neg_sat_cutoff); const __m256 vpos_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.pos_sat_cutoff); const __m256 vc19 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c19); const __m256 vc17 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c17); const __m256 vc15 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c15); const __m256 vc13 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c13); const __m256 vc11 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c11); const __m256 vc9 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c9); const __m256 vc7 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c7); const __m256 vc5 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c5); const __m256 vc3 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c3); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 64 * sizeof(uint16_t); batch -= 64 * sizeof(uint16_t)) { __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8))); __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16))); __m256 vx3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24))); __m256 vx4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32))); __m256 vx5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40))); __m256 vx6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 48))); __m256 vx7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 56))); i += 64; vx0 = _mm256_max_ps(vneg_sat_cutoff, vx0); vx1 = _mm256_max_ps(vneg_sat_cutoff, vx1); vx2 = _mm256_max_ps(vneg_sat_cutoff, vx2); vx3 = _mm256_max_ps(vneg_sat_cutoff, vx3); vx4 = _mm256_max_ps(vneg_sat_cutoff, vx4); vx5 = _mm256_max_ps(vneg_sat_cutoff, vx5); vx6 = _mm256_max_ps(vneg_sat_cutoff, vx6); vx7 = _mm256_max_ps(vneg_sat_cutoff, vx7); vx0 = _mm256_min_ps(vpos_sat_cutoff, vx0); vx1 = _mm256_min_ps(vpos_sat_cutoff, vx1); vx2 = _mm256_min_ps(vpos_sat_cutoff, vx2); vx3 = _mm256_min_ps(vpos_sat_cutoff, vx3); vx4 = _mm256_min_ps(vpos_sat_cutoff, vx4); vx5 = _mm256_min_ps(vpos_sat_cutoff, vx5); vx6 = _mm256_min_ps(vpos_sat_cutoff, vx6); vx7 = _mm256_min_ps(vpos_sat_cutoff, vx7); const __m256 vt0 = _mm256_mul_ps(vx0, vx0); const __m256 vt1 = _mm256_mul_ps(vx1, vx1); const __m256 vt2 = _mm256_mul_ps(vx2, vx2); const __m256 vt3 = _mm256_mul_ps(vx3, vx3); const __m256 vt4 = _mm256_mul_ps(vx4, vx4); const __m256 vt5 = _mm256_mul_ps(vx5, vx5); const __m256 vt6 = _mm256_mul_ps(vx6, vx6); const __m256 vt7 = _mm256_mul_ps(vx7, vx7); __m256 vp0 = vc19; __m256 vp1 = vc19; __m256 vp2 = vc19; __m256 vp3 = vc19; __m256 vp4 = vc19; __m256 vp5 = vc19; __m256 vp6 = vc19; __m256 vp7 = vc19; vp0 = _mm256_fmadd_ps(vp0, vt0, vc17); vp1 = _mm256_fmadd_ps(vp1, vt1, vc17); vp2 = _mm256_fmadd_ps(vp2, vt2, vc17); vp3 = _mm256_fmadd_ps(vp3, vt3, vc17); vp4 = _mm256_fmadd_ps(vp4, vt4, vc17); vp5 = _mm256_fmadd_ps(vp5, vt5, vc17); vp6 = _mm256_fmadd_ps(vp6, vt6, vc17); vp7 = _mm256_fmadd_ps(vp7, vt7, vc17); vp0 = _mm256_fmadd_ps(vp0, vt0, vc15); vp1 = _mm256_fmadd_ps(vp1, vt1, vc15); vp2 = _mm256_fmadd_ps(vp2, vt2, vc15); vp3 = _mm256_fmadd_ps(vp3, vt3, vc15); vp4 = _mm256_fmadd_ps(vp4, vt4, vc15); vp5 = _mm256_fmadd_ps(vp5, vt5, vc15); vp6 = _mm256_fmadd_ps(vp6, vt6, vc15); vp7 = _mm256_fmadd_ps(vp7, vt7, vc15); vp0 = _mm256_fmadd_ps(vp0, vt0, vc13); vp1 = _mm256_fmadd_ps(vp1, vt1, vc13); vp2 = _mm256_fmadd_ps(vp2, vt2, vc13); vp3 = _mm256_fmadd_ps(vp3, vt3, vc13); vp4 = _mm256_fmadd_ps(vp4, vt4, vc13); vp5 = _mm256_fmadd_ps(vp5, vt5, vc13); vp6 = _mm256_fmadd_ps(vp6, vt6, vc13); vp7 = _mm256_fmadd_ps(vp7, vt7, vc13); vp0 = _mm256_fmadd_ps(vp0, vt0, vc11); vp1 = _mm256_fmadd_ps(vp1, vt1, vc11); vp2 = _mm256_fmadd_ps(vp2, vt2, vc11); vp3 = _mm256_fmadd_ps(vp3, vt3, vc11); vp4 = _mm256_fmadd_ps(vp4, vt4, vc11); vp5 = _mm256_fmadd_ps(vp5, vt5, vc11); vp6 = _mm256_fmadd_ps(vp6, vt6, vc11); vp7 = _mm256_fmadd_ps(vp7, vt7, vc11); vp0 = _mm256_fmadd_ps(vp0, vt0, vc9); vp1 = _mm256_fmadd_ps(vp1, vt1, vc9); vp2 = _mm256_fmadd_ps(vp2, vt2, vc9); vp3 = _mm256_fmadd_ps(vp3, vt3, vc9); vp4 = _mm256_fmadd_ps(vp4, vt4, vc9); vp5 = _mm256_fmadd_ps(vp5, vt5, vc9); vp6 = _mm256_fmadd_ps(vp6, vt6, vc9); vp7 = _mm256_fmadd_ps(vp7, vt7, vc9); vp0 = _mm256_fmadd_ps(vp0, vt0, vc7); vp1 = _mm256_fmadd_ps(vp1, vt1, vc7); vp2 = _mm256_fmadd_ps(vp2, vt2, vc7); vp3 = _mm256_fmadd_ps(vp3, vt3, vc7); vp4 = _mm256_fmadd_ps(vp4, vt4, vc7); vp5 = _mm256_fmadd_ps(vp5, vt5, vc7); vp6 = _mm256_fmadd_ps(vp6, vt6, vc7); vp7 = _mm256_fmadd_ps(vp7, vt7, vc7); vp0 = _mm256_fmadd_ps(vp0, vt0, vc5); vp1 = _mm256_fmadd_ps(vp1, vt1, vc5); vp2 = _mm256_fmadd_ps(vp2, vt2, vc5); vp3 = _mm256_fmadd_ps(vp3, vt3, vc5); vp4 = _mm256_fmadd_ps(vp4, vt4, vc5); vp5 = _mm256_fmadd_ps(vp5, vt5, vc5); vp6 = _mm256_fmadd_ps(vp6, vt6, vc5); vp7 = _mm256_fmadd_ps(vp7, vt7, vc5); vp0 = _mm256_fmadd_ps(vp0, vt0, vc3); vp1 = _mm256_fmadd_ps(vp1, vt1, vc3); vp2 = _mm256_fmadd_ps(vp2, vt2, vc3); vp3 = _mm256_fmadd_ps(vp3, vt3, vc3); vp4 = _mm256_fmadd_ps(vp4, vt4, vc3); vp5 = _mm256_fmadd_ps(vp5, vt5, vc3); vp6 = _mm256_fmadd_ps(vp6, vt6, vc3); vp7 = _mm256_fmadd_ps(vp7, vt7, vc3); const __m256 vxt0 = _mm256_mul_ps(vx0, vt0); const __m256 vxt1 = _mm256_mul_ps(vx1, vt1); const __m256 vxt2 = _mm256_mul_ps(vx2, vt2); const __m256 vxt3 = _mm256_mul_ps(vx3, vt3); const __m256 vxt4 = _mm256_mul_ps(vx4, vt4); const __m256 vxt5 = _mm256_mul_ps(vx5, vt5); const __m256 vxt6 = _mm256_mul_ps(vx6, vt6); const __m256 vxt7 = _mm256_mul_ps(vx7, vt7); const __m256 vy0 = _mm256_fmadd_ps(vp0, vxt0, vx0); const __m256 vy1 = _mm256_fmadd_ps(vp1, vxt1, vx1); const __m256 vy2 = _mm256_fmadd_ps(vp2, vxt2, vx2); const __m256 vy3 = _mm256_fmadd_ps(vp3, vxt3, vx3); const __m256 vy4 = _mm256_fmadd_ps(vp4, vxt4, vx4); const __m256 vy5 = _mm256_fmadd_ps(vp5, vxt5, vx5); const __m256 vy6 = _mm256_fmadd_ps(vp6, vxt6, vx6); const __m256 vy7 = _mm256_fmadd_ps(vp7, vxt7, vx7); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 48), _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 56), _mm256_cvtps_ph(vy7, _MM_FROUND_TO_NEAREST_INT)); o += 64; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); i += 8; vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = vc19; vp = _mm256_fmadd_ps(vp, vt, vc17); vp = _mm256_fmadd_ps(vp, vt, vc15); vp = _mm256_fmadd_ps(vp, vt, vc13); vp = _mm256_fmadd_ps(vp, vt, vc11); vp = _mm256_fmadd_ps(vp, vt, vc9); vp = _mm256_fmadd_ps(vp, vt, vc7); vp = _mm256_fmadd_ps(vp, vt, vc5); vp = _mm256_fmadd_ps(vp, vt, vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_fmadd_ps(vp, vxt, vx); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT)); o += 8; } if (batch != 0) { __m256 vx = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) i)); vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = vc19; vp = _mm256_fmadd_ps(vp, vt, vc17); vp = _mm256_fmadd_ps(vp, vt, vc15); vp = _mm256_fmadd_ps(vp, vt, vc13); vp = _mm256_fmadd_ps(vp, vt, vc11); vp = _mm256_fmadd_ps(vp, vt, vc9); vp = _mm256_fmadd_ps(vp, vt, vc7); vp = _mm256_fmadd_ps(vp, vt, vc5); vp = _mm256_fmadd_ps(vp, vt, vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_fmadd_ps(vp, vxt, vx); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
10,045
39.184
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-polynomial-p19h9t2-x72.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-polynomial.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <math.h> #include <immintrin.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_polynomial_p19h9t2_x72( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m256 vneg_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.neg_sat_cutoff); const __m256 vpos_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.pos_sat_cutoff); const __m256 vc19 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c19); const __m256 vc17 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c17); const __m256 vc15 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c15); const __m256 vc13 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c13); const __m256 vc11 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c11); const __m256 vc9 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c9); const __m256 vc7 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c7); const __m256 vc5 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c5); const __m256 vc3 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c3); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 72 * sizeof(uint16_t); batch -= 72 * sizeof(uint16_t)) { __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8))); __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16))); __m256 vx3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24))); __m256 vx4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32))); __m256 vx5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40))); __m256 vx6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 48))); __m256 vx7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 56))); __m256 vx8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 64))); i += 72; vx0 = _mm256_max_ps(vneg_sat_cutoff, vx0); vx1 = _mm256_max_ps(vneg_sat_cutoff, vx1); vx2 = _mm256_max_ps(vneg_sat_cutoff, vx2); vx3 = _mm256_max_ps(vneg_sat_cutoff, vx3); vx4 = _mm256_max_ps(vneg_sat_cutoff, vx4); vx5 = _mm256_max_ps(vneg_sat_cutoff, vx5); vx6 = _mm256_max_ps(vneg_sat_cutoff, vx6); vx7 = _mm256_max_ps(vneg_sat_cutoff, vx7); vx8 = _mm256_max_ps(vneg_sat_cutoff, vx8); vx0 = _mm256_min_ps(vpos_sat_cutoff, vx0); vx1 = _mm256_min_ps(vpos_sat_cutoff, vx1); vx2 = _mm256_min_ps(vpos_sat_cutoff, vx2); vx3 = _mm256_min_ps(vpos_sat_cutoff, vx3); vx4 = _mm256_min_ps(vpos_sat_cutoff, vx4); vx5 = _mm256_min_ps(vpos_sat_cutoff, vx5); vx6 = _mm256_min_ps(vpos_sat_cutoff, vx6); vx7 = _mm256_min_ps(vpos_sat_cutoff, vx7); vx8 = _mm256_min_ps(vpos_sat_cutoff, vx8); const __m256 vt0 = _mm256_mul_ps(vx0, vx0); const __m256 vt1 = _mm256_mul_ps(vx1, vx1); const __m256 vt2 = _mm256_mul_ps(vx2, vx2); const __m256 vt3 = _mm256_mul_ps(vx3, vx3); const __m256 vt4 = _mm256_mul_ps(vx4, vx4); const __m256 vt5 = _mm256_mul_ps(vx5, vx5); const __m256 vt6 = _mm256_mul_ps(vx6, vx6); const __m256 vt7 = _mm256_mul_ps(vx7, vx7); const __m256 vt8 = _mm256_mul_ps(vx8, vx8); __m256 vp0 = vc19; __m256 vp1 = vc19; __m256 vp2 = vc19; __m256 vp3 = vc19; __m256 vp4 = vc19; __m256 vp5 = vc19; __m256 vp6 = vc19; __m256 vp7 = vc19; __m256 vp8 = vc19; vp0 = _mm256_fmadd_ps(vp0, vt0, vc17); vp1 = _mm256_fmadd_ps(vp1, vt1, vc17); vp2 = _mm256_fmadd_ps(vp2, vt2, vc17); vp3 = _mm256_fmadd_ps(vp3, vt3, vc17); vp4 = _mm256_fmadd_ps(vp4, vt4, vc17); vp5 = _mm256_fmadd_ps(vp5, vt5, vc17); vp6 = _mm256_fmadd_ps(vp6, vt6, vc17); vp7 = _mm256_fmadd_ps(vp7, vt7, vc17); vp8 = _mm256_fmadd_ps(vp8, vt8, vc17); vp0 = _mm256_fmadd_ps(vp0, vt0, vc15); vp1 = _mm256_fmadd_ps(vp1, vt1, vc15); vp2 = _mm256_fmadd_ps(vp2, vt2, vc15); vp3 = _mm256_fmadd_ps(vp3, vt3, vc15); vp4 = _mm256_fmadd_ps(vp4, vt4, vc15); vp5 = _mm256_fmadd_ps(vp5, vt5, vc15); vp6 = _mm256_fmadd_ps(vp6, vt6, vc15); vp7 = _mm256_fmadd_ps(vp7, vt7, vc15); vp8 = _mm256_fmadd_ps(vp8, vt8, vc15); vp0 = _mm256_fmadd_ps(vp0, vt0, vc13); vp1 = _mm256_fmadd_ps(vp1, vt1, vc13); vp2 = _mm256_fmadd_ps(vp2, vt2, vc13); vp3 = _mm256_fmadd_ps(vp3, vt3, vc13); vp4 = _mm256_fmadd_ps(vp4, vt4, vc13); vp5 = _mm256_fmadd_ps(vp5, vt5, vc13); vp6 = _mm256_fmadd_ps(vp6, vt6, vc13); vp7 = _mm256_fmadd_ps(vp7, vt7, vc13); vp8 = _mm256_fmadd_ps(vp8, vt8, vc13); vp0 = _mm256_fmadd_ps(vp0, vt0, vc11); vp1 = _mm256_fmadd_ps(vp1, vt1, vc11); vp2 = _mm256_fmadd_ps(vp2, vt2, vc11); vp3 = _mm256_fmadd_ps(vp3, vt3, vc11); vp4 = _mm256_fmadd_ps(vp4, vt4, vc11); vp5 = _mm256_fmadd_ps(vp5, vt5, vc11); vp6 = _mm256_fmadd_ps(vp6, vt6, vc11); vp7 = _mm256_fmadd_ps(vp7, vt7, vc11); vp8 = _mm256_fmadd_ps(vp8, vt8, vc11); vp0 = _mm256_fmadd_ps(vp0, vt0, vc9); vp1 = _mm256_fmadd_ps(vp1, vt1, vc9); vp2 = _mm256_fmadd_ps(vp2, vt2, vc9); vp3 = _mm256_fmadd_ps(vp3, vt3, vc9); vp4 = _mm256_fmadd_ps(vp4, vt4, vc9); vp5 = _mm256_fmadd_ps(vp5, vt5, vc9); vp6 = _mm256_fmadd_ps(vp6, vt6, vc9); vp7 = _mm256_fmadd_ps(vp7, vt7, vc9); vp8 = _mm256_fmadd_ps(vp8, vt8, vc9); vp0 = _mm256_fmadd_ps(vp0, vt0, vc7); vp1 = _mm256_fmadd_ps(vp1, vt1, vc7); vp2 = _mm256_fmadd_ps(vp2, vt2, vc7); vp3 = _mm256_fmadd_ps(vp3, vt3, vc7); vp4 = _mm256_fmadd_ps(vp4, vt4, vc7); vp5 = _mm256_fmadd_ps(vp5, vt5, vc7); vp6 = _mm256_fmadd_ps(vp6, vt6, vc7); vp7 = _mm256_fmadd_ps(vp7, vt7, vc7); vp8 = _mm256_fmadd_ps(vp8, vt8, vc7); vp0 = _mm256_fmadd_ps(vp0, vt0, vc5); vp1 = _mm256_fmadd_ps(vp1, vt1, vc5); vp2 = _mm256_fmadd_ps(vp2, vt2, vc5); vp3 = _mm256_fmadd_ps(vp3, vt3, vc5); vp4 = _mm256_fmadd_ps(vp4, vt4, vc5); vp5 = _mm256_fmadd_ps(vp5, vt5, vc5); vp6 = _mm256_fmadd_ps(vp6, vt6, vc5); vp7 = _mm256_fmadd_ps(vp7, vt7, vc5); vp8 = _mm256_fmadd_ps(vp8, vt8, vc5); vp0 = _mm256_fmadd_ps(vp0, vt0, vc3); vp1 = _mm256_fmadd_ps(vp1, vt1, vc3); vp2 = _mm256_fmadd_ps(vp2, vt2, vc3); vp3 = _mm256_fmadd_ps(vp3, vt3, vc3); vp4 = _mm256_fmadd_ps(vp4, vt4, vc3); vp5 = _mm256_fmadd_ps(vp5, vt5, vc3); vp6 = _mm256_fmadd_ps(vp6, vt6, vc3); vp7 = _mm256_fmadd_ps(vp7, vt7, vc3); vp8 = _mm256_fmadd_ps(vp8, vt8, vc3); const __m256 vxt0 = _mm256_mul_ps(vx0, vt0); const __m256 vxt1 = _mm256_mul_ps(vx1, vt1); const __m256 vxt2 = _mm256_mul_ps(vx2, vt2); const __m256 vxt3 = _mm256_mul_ps(vx3, vt3); const __m256 vxt4 = _mm256_mul_ps(vx4, vt4); const __m256 vxt5 = _mm256_mul_ps(vx5, vt5); const __m256 vxt6 = _mm256_mul_ps(vx6, vt6); const __m256 vxt7 = _mm256_mul_ps(vx7, vt7); const __m256 vxt8 = _mm256_mul_ps(vx8, vt8); const __m256 vy0 = _mm256_fmadd_ps(vp0, vxt0, vx0); const __m256 vy1 = _mm256_fmadd_ps(vp1, vxt1, vx1); const __m256 vy2 = _mm256_fmadd_ps(vp2, vxt2, vx2); const __m256 vy3 = _mm256_fmadd_ps(vp3, vxt3, vx3); const __m256 vy4 = _mm256_fmadd_ps(vp4, vxt4, vx4); const __m256 vy5 = _mm256_fmadd_ps(vp5, vxt5, vx5); const __m256 vy6 = _mm256_fmadd_ps(vp6, vxt6, vx6); const __m256 vy7 = _mm256_fmadd_ps(vp7, vxt7, vx7); const __m256 vy8 = _mm256_fmadd_ps(vp8, vxt8, vx8); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 48), _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 56), _mm256_cvtps_ph(vy7, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 64), _mm256_cvtps_ph(vy8, _MM_FROUND_TO_NEAREST_INT)); o += 72; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); i += 8; vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = vc19; vp = _mm256_fmadd_ps(vp, vt, vc17); vp = _mm256_fmadd_ps(vp, vt, vc15); vp = _mm256_fmadd_ps(vp, vt, vc13); vp = _mm256_fmadd_ps(vp, vt, vc11); vp = _mm256_fmadd_ps(vp, vt, vc9); vp = _mm256_fmadd_ps(vp, vt, vc7); vp = _mm256_fmadd_ps(vp, vt, vc5); vp = _mm256_fmadd_ps(vp, vt, vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_fmadd_ps(vp, vxt, vx); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT)); o += 8; } if (batch != 0) { __m256 vx = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) i)); vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = vc19; vp = _mm256_fmadd_ps(vp, vt, vc17); vp = _mm256_fmadd_ps(vp, vt, vc15); vp = _mm256_fmadd_ps(vp, vt, vc13); vp = _mm256_fmadd_ps(vp, vt, vc11); vp = _mm256_fmadd_ps(vp, vt, vc9); vp = _mm256_fmadd_ps(vp, vt, vc7); vp = _mm256_fmadd_ps(vp, vt, vc5); vp = _mm256_fmadd_ps(vp, vt, vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_fmadd_ps(vp, vxt, vx); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
10,825
39.699248
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-polynomial-p19h9t2-x8.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-polynomial.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <math.h> #include <immintrin.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_polynomial_p19h9t2_x8( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m256 vneg_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.neg_sat_cutoff); const __m256 vpos_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.pos_sat_cutoff); const __m256 vc19 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c19); const __m256 vc17 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c17); const __m256 vc15 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c15); const __m256 vc13 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c13); const __m256 vc11 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c11); const __m256 vc9 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c9); const __m256 vc7 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c7); const __m256 vc5 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c5); const __m256 vc3 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c3); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); i += 8; vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = vc19; vp = _mm256_fmadd_ps(vp, vt, vc17); vp = _mm256_fmadd_ps(vp, vt, vc15); vp = _mm256_fmadd_ps(vp, vt, vc13); vp = _mm256_fmadd_ps(vp, vt, vc11); vp = _mm256_fmadd_ps(vp, vt, vc9); vp = _mm256_fmadd_ps(vp, vt, vc7); vp = _mm256_fmadd_ps(vp, vt, vc5); vp = _mm256_fmadd_ps(vp, vt, vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_fmadd_ps(vp, vxt, vx); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT)); o += 8; } if (batch != 0) { __m256 vx = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) i)); vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = vc19; vp = _mm256_fmadd_ps(vp, vt, vc17); vp = _mm256_fmadd_ps(vp, vt, vc15); vp = _mm256_fmadd_ps(vp, vt, vc13); vp = _mm256_fmadd_ps(vp, vt, vc11); vp = _mm256_fmadd_ps(vp, vt, vc9); vp = _mm256_fmadd_ps(vp, vt, vc7); vp = _mm256_fmadd_ps(vp, vt, vc5); vp = _mm256_fmadd_ps(vp, vt, vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_fmadd_ps(vp, vxt, vx); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
3,710
31.840708
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-fma3-polynomial-p19h9t2-x80.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/avx-polynomial.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <stddef.h> #include <math.h> #include <immintrin.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/microparams.h> #include <xnnpack/vunary.h> void xnn_f16_vtanh_ukernel__fma3_polynomial_p19h9t2_x80( size_t batch, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const __m256 vneg_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.neg_sat_cutoff); const __m256 vpos_sat_cutoff = _mm256_load_ps(params->avx_polynomial_p19h9t2.pos_sat_cutoff); const __m256 vc19 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c19); const __m256 vc17 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c17); const __m256 vc15 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c15); const __m256 vc13 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c13); const __m256 vc11 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c11); const __m256 vc9 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c9); const __m256 vc7 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c7); const __m256 vc5 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c5); const __m256 vc3 = _mm256_load_ps(params->avx_polynomial_p19h9t2.c3); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 80 * sizeof(uint16_t); batch -= 80 * sizeof(uint16_t)) { __m256 vx0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); __m256 vx1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8))); __m256 vx2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 16))); __m256 vx3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 24))); __m256 vx4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 32))); __m256 vx5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 40))); __m256 vx6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 48))); __m256 vx7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 56))); __m256 vx8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 64))); __m256 vx9 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 72))); i += 80; vx0 = _mm256_max_ps(vneg_sat_cutoff, vx0); vx1 = _mm256_max_ps(vneg_sat_cutoff, vx1); vx2 = _mm256_max_ps(vneg_sat_cutoff, vx2); vx3 = _mm256_max_ps(vneg_sat_cutoff, vx3); vx4 = _mm256_max_ps(vneg_sat_cutoff, vx4); vx5 = _mm256_max_ps(vneg_sat_cutoff, vx5); vx6 = _mm256_max_ps(vneg_sat_cutoff, vx6); vx7 = _mm256_max_ps(vneg_sat_cutoff, vx7); vx8 = _mm256_max_ps(vneg_sat_cutoff, vx8); vx9 = _mm256_max_ps(vneg_sat_cutoff, vx9); vx0 = _mm256_min_ps(vpos_sat_cutoff, vx0); vx1 = _mm256_min_ps(vpos_sat_cutoff, vx1); vx2 = _mm256_min_ps(vpos_sat_cutoff, vx2); vx3 = _mm256_min_ps(vpos_sat_cutoff, vx3); vx4 = _mm256_min_ps(vpos_sat_cutoff, vx4); vx5 = _mm256_min_ps(vpos_sat_cutoff, vx5); vx6 = _mm256_min_ps(vpos_sat_cutoff, vx6); vx7 = _mm256_min_ps(vpos_sat_cutoff, vx7); vx8 = _mm256_min_ps(vpos_sat_cutoff, vx8); vx9 = _mm256_min_ps(vpos_sat_cutoff, vx9); const __m256 vt0 = _mm256_mul_ps(vx0, vx0); const __m256 vt1 = _mm256_mul_ps(vx1, vx1); const __m256 vt2 = _mm256_mul_ps(vx2, vx2); const __m256 vt3 = _mm256_mul_ps(vx3, vx3); const __m256 vt4 = _mm256_mul_ps(vx4, vx4); const __m256 vt5 = _mm256_mul_ps(vx5, vx5); const __m256 vt6 = _mm256_mul_ps(vx6, vx6); const __m256 vt7 = _mm256_mul_ps(vx7, vx7); const __m256 vt8 = _mm256_mul_ps(vx8, vx8); const __m256 vt9 = _mm256_mul_ps(vx9, vx9); __m256 vp0 = vc19; __m256 vp1 = vc19; __m256 vp2 = vc19; __m256 vp3 = vc19; __m256 vp4 = vc19; __m256 vp5 = vc19; __m256 vp6 = vc19; __m256 vp7 = vc19; __m256 vp8 = vc19; __m256 vp9 = vc19; vp0 = _mm256_fmadd_ps(vp0, vt0, vc17); vp1 = _mm256_fmadd_ps(vp1, vt1, vc17); vp2 = _mm256_fmadd_ps(vp2, vt2, vc17); vp3 = _mm256_fmadd_ps(vp3, vt3, vc17); vp4 = _mm256_fmadd_ps(vp4, vt4, vc17); vp5 = _mm256_fmadd_ps(vp5, vt5, vc17); vp6 = _mm256_fmadd_ps(vp6, vt6, vc17); vp7 = _mm256_fmadd_ps(vp7, vt7, vc17); vp8 = _mm256_fmadd_ps(vp8, vt8, vc17); vp9 = _mm256_fmadd_ps(vp9, vt9, vc17); vp0 = _mm256_fmadd_ps(vp0, vt0, vc15); vp1 = _mm256_fmadd_ps(vp1, vt1, vc15); vp2 = _mm256_fmadd_ps(vp2, vt2, vc15); vp3 = _mm256_fmadd_ps(vp3, vt3, vc15); vp4 = _mm256_fmadd_ps(vp4, vt4, vc15); vp5 = _mm256_fmadd_ps(vp5, vt5, vc15); vp6 = _mm256_fmadd_ps(vp6, vt6, vc15); vp7 = _mm256_fmadd_ps(vp7, vt7, vc15); vp8 = _mm256_fmadd_ps(vp8, vt8, vc15); vp9 = _mm256_fmadd_ps(vp9, vt9, vc15); vp0 = _mm256_fmadd_ps(vp0, vt0, vc13); vp1 = _mm256_fmadd_ps(vp1, vt1, vc13); vp2 = _mm256_fmadd_ps(vp2, vt2, vc13); vp3 = _mm256_fmadd_ps(vp3, vt3, vc13); vp4 = _mm256_fmadd_ps(vp4, vt4, vc13); vp5 = _mm256_fmadd_ps(vp5, vt5, vc13); vp6 = _mm256_fmadd_ps(vp6, vt6, vc13); vp7 = _mm256_fmadd_ps(vp7, vt7, vc13); vp8 = _mm256_fmadd_ps(vp8, vt8, vc13); vp9 = _mm256_fmadd_ps(vp9, vt9, vc13); vp0 = _mm256_fmadd_ps(vp0, vt0, vc11); vp1 = _mm256_fmadd_ps(vp1, vt1, vc11); vp2 = _mm256_fmadd_ps(vp2, vt2, vc11); vp3 = _mm256_fmadd_ps(vp3, vt3, vc11); vp4 = _mm256_fmadd_ps(vp4, vt4, vc11); vp5 = _mm256_fmadd_ps(vp5, vt5, vc11); vp6 = _mm256_fmadd_ps(vp6, vt6, vc11); vp7 = _mm256_fmadd_ps(vp7, vt7, vc11); vp8 = _mm256_fmadd_ps(vp8, vt8, vc11); vp9 = _mm256_fmadd_ps(vp9, vt9, vc11); vp0 = _mm256_fmadd_ps(vp0, vt0, vc9); vp1 = _mm256_fmadd_ps(vp1, vt1, vc9); vp2 = _mm256_fmadd_ps(vp2, vt2, vc9); vp3 = _mm256_fmadd_ps(vp3, vt3, vc9); vp4 = _mm256_fmadd_ps(vp4, vt4, vc9); vp5 = _mm256_fmadd_ps(vp5, vt5, vc9); vp6 = _mm256_fmadd_ps(vp6, vt6, vc9); vp7 = _mm256_fmadd_ps(vp7, vt7, vc9); vp8 = _mm256_fmadd_ps(vp8, vt8, vc9); vp9 = _mm256_fmadd_ps(vp9, vt9, vc9); vp0 = _mm256_fmadd_ps(vp0, vt0, vc7); vp1 = _mm256_fmadd_ps(vp1, vt1, vc7); vp2 = _mm256_fmadd_ps(vp2, vt2, vc7); vp3 = _mm256_fmadd_ps(vp3, vt3, vc7); vp4 = _mm256_fmadd_ps(vp4, vt4, vc7); vp5 = _mm256_fmadd_ps(vp5, vt5, vc7); vp6 = _mm256_fmadd_ps(vp6, vt6, vc7); vp7 = _mm256_fmadd_ps(vp7, vt7, vc7); vp8 = _mm256_fmadd_ps(vp8, vt8, vc7); vp9 = _mm256_fmadd_ps(vp9, vt9, vc7); vp0 = _mm256_fmadd_ps(vp0, vt0, vc5); vp1 = _mm256_fmadd_ps(vp1, vt1, vc5); vp2 = _mm256_fmadd_ps(vp2, vt2, vc5); vp3 = _mm256_fmadd_ps(vp3, vt3, vc5); vp4 = _mm256_fmadd_ps(vp4, vt4, vc5); vp5 = _mm256_fmadd_ps(vp5, vt5, vc5); vp6 = _mm256_fmadd_ps(vp6, vt6, vc5); vp7 = _mm256_fmadd_ps(vp7, vt7, vc5); vp8 = _mm256_fmadd_ps(vp8, vt8, vc5); vp9 = _mm256_fmadd_ps(vp9, vt9, vc5); vp0 = _mm256_fmadd_ps(vp0, vt0, vc3); vp1 = _mm256_fmadd_ps(vp1, vt1, vc3); vp2 = _mm256_fmadd_ps(vp2, vt2, vc3); vp3 = _mm256_fmadd_ps(vp3, vt3, vc3); vp4 = _mm256_fmadd_ps(vp4, vt4, vc3); vp5 = _mm256_fmadd_ps(vp5, vt5, vc3); vp6 = _mm256_fmadd_ps(vp6, vt6, vc3); vp7 = _mm256_fmadd_ps(vp7, vt7, vc3); vp8 = _mm256_fmadd_ps(vp8, vt8, vc3); vp9 = _mm256_fmadd_ps(vp9, vt9, vc3); const __m256 vxt0 = _mm256_mul_ps(vx0, vt0); const __m256 vxt1 = _mm256_mul_ps(vx1, vt1); const __m256 vxt2 = _mm256_mul_ps(vx2, vt2); const __m256 vxt3 = _mm256_mul_ps(vx3, vt3); const __m256 vxt4 = _mm256_mul_ps(vx4, vt4); const __m256 vxt5 = _mm256_mul_ps(vx5, vt5); const __m256 vxt6 = _mm256_mul_ps(vx6, vt6); const __m256 vxt7 = _mm256_mul_ps(vx7, vt7); const __m256 vxt8 = _mm256_mul_ps(vx8, vt8); const __m256 vxt9 = _mm256_mul_ps(vx9, vt9); const __m256 vy0 = _mm256_fmadd_ps(vp0, vxt0, vx0); const __m256 vy1 = _mm256_fmadd_ps(vp1, vxt1, vx1); const __m256 vy2 = _mm256_fmadd_ps(vp2, vxt2, vx2); const __m256 vy3 = _mm256_fmadd_ps(vp3, vxt3, vx3); const __m256 vy4 = _mm256_fmadd_ps(vp4, vxt4, vx4); const __m256 vy5 = _mm256_fmadd_ps(vp5, vxt5, vx5); const __m256 vy6 = _mm256_fmadd_ps(vp6, vxt6, vx6); const __m256 vy7 = _mm256_fmadd_ps(vp7, vxt7, vx7); const __m256 vy8 = _mm256_fmadd_ps(vp8, vxt8, vx8); const __m256 vy9 = _mm256_fmadd_ps(vp9, vxt9, vx9); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy0, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vy1, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 16), _mm256_cvtps_ph(vy2, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 24), _mm256_cvtps_ph(vy3, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 32), _mm256_cvtps_ph(vy4, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 40), _mm256_cvtps_ph(vy5, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 48), _mm256_cvtps_ph(vy6, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 56), _mm256_cvtps_ph(vy7, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 64), _mm256_cvtps_ph(vy8, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 72), _mm256_cvtps_ph(vy9, _MM_FROUND_TO_NEAREST_INT)); o += 80; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); i += 8; vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = vc19; vp = _mm256_fmadd_ps(vp, vt, vc17); vp = _mm256_fmadd_ps(vp, vt, vc15); vp = _mm256_fmadd_ps(vp, vt, vc13); vp = _mm256_fmadd_ps(vp, vt, vc11); vp = _mm256_fmadd_ps(vp, vt, vc9); vp = _mm256_fmadd_ps(vp, vt, vc7); vp = _mm256_fmadd_ps(vp, vt, vc5); vp = _mm256_fmadd_ps(vp, vt, vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_fmadd_ps(vp, vxt, vx); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT)); o += 8; } if (batch != 0) { __m256 vx = _mm256_cvtph_ps(_mm_load_si128((const __m128i*) i)); vx = _mm256_max_ps(vneg_sat_cutoff, vx); vx = _mm256_min_ps(vpos_sat_cutoff, vx); const __m256 vt = _mm256_mul_ps(vx, vx); __m256 vp = vc19; vp = _mm256_fmadd_ps(vp, vt, vc17); vp = _mm256_fmadd_ps(vp, vt, vc15); vp = _mm256_fmadd_ps(vp, vt, vc13); vp = _mm256_fmadd_ps(vp, vt, vc11); vp = _mm256_fmadd_ps(vp, vt, vc9); vp = _mm256_fmadd_ps(vp, vt, vc7); vp = _mm256_fmadd_ps(vp, vt, vc5); vp = _mm256_fmadd_ps(vp, vt, vc3); const __m256 vxt = _mm256_mul_ps(vx, vt); const __m256 vy = _mm256_fmadd_ps(vp, vxt, vx); __m128i vh = _mm256_cvtps_ph(vy, _MM_FROUND_TO_NEAREST_INT); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); vh = _mm_unpackhi_epi64(vh, vh); o += 4; } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); vh = _mm_srli_epi64(vh, 32); o += 2; } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
11,605
40.156028
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-nr1fma-x16.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_nr1fma_x16( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 2 * sizeof(float16x8_t); n -= 2 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); const float16x8_t verepo0 = vfmaq_f16(vminus_one, vrepo0, vepo0); const float16x8_t verepo1 = vfmaq_f16(vminus_one, vrepo1, vepo1); vrepo0 = vfmsq_f16(vrepo0, vrepo0, verepo0); vrepo1 = vfmsq_f16(vrepo1, vrepo1, verepo1); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vfmaq_f16(vminus_one, vrepo, vepo); vrepo = vfmsq_f16(vrepo, vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vfmaq_f16(vminus_one, vrepo, vepo); vrepo = vfmsq_f16(vrepo, vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
6,107
36.703704
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-nr1fma-x24.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_nr1fma_x24( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 3 * sizeof(float16x8_t); n -= 3 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); float16x8_t vz2 = vabsq_f16(vx2); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); vz2 = vminq_f16(vz2, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10)); vn2 = vsubq_f16(vn2, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); vp2 = vfmsq_f16(vtwo, vp2, vt2); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vts2 = vmulq_f16(vt2, vs2); const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); float16x8_t vrepo2 = vrecpeq_f16(vepo2); const float16x8_t verepo0 = vfmaq_f16(vminus_one, vrepo0, vepo0); const float16x8_t verepo1 = vfmaq_f16(vminus_one, vrepo1, vepo1); const float16x8_t verepo2 = vfmaq_f16(vminus_one, vrepo2, vepo2); vrepo0 = vfmsq_f16(vrepo0, vrepo0, verepo0); vrepo1 = vfmsq_f16(vrepo1, vrepo1, verepo1); vrepo2 = vfmsq_f16(vrepo2, vrepo2, verepo2); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); float16x8_t vy2 = vmulq_f16(vemo2, vrepo2); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vy2 = vbslq_f16(vsign_mask, vx2, vy2); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vfmaq_f16(vminus_one, vrepo, vepo); vrepo = vfmsq_f16(vrepo, vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vfmaq_f16(vminus_one, vrepo, vepo); vrepo = vfmsq_f16(vrepo, vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
7,127
38.381215
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-nr1fma-x32.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_nr1fma_x32( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 4 * sizeof(float16x8_t); n -= 4 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); float16x8_t vz2 = vabsq_f16(vx2); float16x8_t vz3 = vabsq_f16(vx3); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); vz2 = vminq_f16(vz2, vsat_cutoff); vz3 = vminq_f16(vz3, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e); float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10)); vn2 = vsubq_f16(vn2, vmagic_bias); const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10)); vn3 = vsubq_f16(vn3, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2); const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2); float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); vp2 = vfmsq_f16(vtwo, vp2, vt2); vp3 = vfmsq_f16(vtwo, vp3, vt3); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vts2 = vmulq_f16(vt2, vs2); const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one); const float16x8_t vts3 = vmulq_f16(vt3, vs3); const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2); const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo); const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); float16x8_t vrepo2 = vrecpeq_f16(vepo2); float16x8_t vrepo3 = vrecpeq_f16(vepo3); const float16x8_t verepo0 = vfmaq_f16(vminus_one, vrepo0, vepo0); const float16x8_t verepo1 = vfmaq_f16(vminus_one, vrepo1, vepo1); const float16x8_t verepo2 = vfmaq_f16(vminus_one, vrepo2, vepo2); const float16x8_t verepo3 = vfmaq_f16(vminus_one, vrepo3, vepo3); vrepo0 = vfmsq_f16(vrepo0, vrepo0, verepo0); vrepo1 = vfmsq_f16(vrepo1, vrepo1, verepo1); vrepo2 = vfmsq_f16(vrepo2, vrepo2, verepo2); vrepo3 = vfmsq_f16(vrepo3, vrepo3, verepo3); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); float16x8_t vy2 = vmulq_f16(vemo2, vrepo2); float16x8_t vy3 = vmulq_f16(vemo3, vrepo3); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vy2 = vbslq_f16(vsign_mask, vx2, vy2); vy3 = vbslq_f16(vsign_mask, vx3, vy3); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vfmaq_f16(vminus_one, vrepo, vepo); vrepo = vfmsq_f16(vrepo, vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vfmaq_f16(vminus_one, vrepo, vepo); vrepo = vfmsq_f16(vrepo, vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
8,147
39.74
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-nr1fma-x40.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_nr1fma_x40( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 5 * sizeof(float16x8_t); n -= 5 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); float16x8_t vz2 = vabsq_f16(vx2); float16x8_t vz3 = vabsq_f16(vx3); float16x8_t vz4 = vabsq_f16(vx4); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); vz2 = vminq_f16(vz2, vsat_cutoff); vz3 = vminq_f16(vz3, vsat_cutoff); vz4 = vminq_f16(vz4, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e); float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e); float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10)); vn2 = vsubq_f16(vn2, vmagic_bias); const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10)); vn3 = vsubq_f16(vn3, vmagic_bias); const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10)); vn4 = vsubq_f16(vn4, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2); const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2); const float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2); float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3); float16x8_t vp4 = vfmaq_f16(vc2, vc3, vt4); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); vp2 = vfmsq_f16(vtwo, vp2, vt2); vp3 = vfmsq_f16(vtwo, vp3, vt3); vp4 = vfmsq_f16(vtwo, vp4, vt4); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vts2 = vmulq_f16(vt2, vs2); const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one); const float16x8_t vts3 = vmulq_f16(vt3, vs3); const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one); const float16x8_t vts4 = vmulq_f16(vt4, vs4); const float16x8_t vsmo4 = vaddq_f16(vs4, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2); const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3); const float16x8_t vemo4 = vfmsq_f16(vsmo4, vp4, vts4); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo); const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo); const float16x8_t vepo4 = vaddq_f16(vemo4, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); float16x8_t vrepo2 = vrecpeq_f16(vepo2); float16x8_t vrepo3 = vrecpeq_f16(vepo3); float16x8_t vrepo4 = vrecpeq_f16(vepo4); const float16x8_t verepo0 = vfmaq_f16(vminus_one, vrepo0, vepo0); const float16x8_t verepo1 = vfmaq_f16(vminus_one, vrepo1, vepo1); const float16x8_t verepo2 = vfmaq_f16(vminus_one, vrepo2, vepo2); const float16x8_t verepo3 = vfmaq_f16(vminus_one, vrepo3, vepo3); const float16x8_t verepo4 = vfmaq_f16(vminus_one, vrepo4, vepo4); vrepo0 = vfmsq_f16(vrepo0, vrepo0, verepo0); vrepo1 = vfmsq_f16(vrepo1, vrepo1, verepo1); vrepo2 = vfmsq_f16(vrepo2, vrepo2, verepo2); vrepo3 = vfmsq_f16(vrepo3, vrepo3, verepo3); vrepo4 = vfmsq_f16(vrepo4, vrepo4, verepo4); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); float16x8_t vy2 = vmulq_f16(vemo2, vrepo2); float16x8_t vy3 = vmulq_f16(vemo3, vrepo3); float16x8_t vy4 = vmulq_f16(vemo4, vrepo4); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vy2 = vbslq_f16(vsign_mask, vx2, vy2); vy3 = vbslq_f16(vsign_mask, vx3, vy3); vy4 = vbslq_f16(vsign_mask, vx4, vy4); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy4)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vfmaq_f16(vminus_one, vrepo, vepo); vrepo = vfmsq_f16(vrepo, vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vfmaq_f16(vminus_one, vrepo, vepo); vrepo = vfmsq_f16(vrepo, vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
9,167
40.863014
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-nr1fma-x48.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_nr1fma_x48( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 6 * sizeof(float16x8_t); n -= 6 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); float16x8_t vz2 = vabsq_f16(vx2); float16x8_t vz3 = vabsq_f16(vx3); float16x8_t vz4 = vabsq_f16(vx4); float16x8_t vz5 = vabsq_f16(vx5); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); vz2 = vminq_f16(vz2, vsat_cutoff); vz3 = vminq_f16(vz3, vsat_cutoff); vz4 = vminq_f16(vz4, vsat_cutoff); vz5 = vminq_f16(vz5, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e); float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e); float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e); float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10)); vn2 = vsubq_f16(vn2, vmagic_bias); const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10)); vn3 = vsubq_f16(vn3, vmagic_bias); const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10)); vn4 = vsubq_f16(vn4, vmagic_bias); const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10)); vn5 = vsubq_f16(vn5, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2); const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2); const float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2); const float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2); float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3); float16x8_t vp4 = vfmaq_f16(vc2, vc3, vt4); float16x8_t vp5 = vfmaq_f16(vc2, vc3, vt5); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); vp2 = vfmsq_f16(vtwo, vp2, vt2); vp3 = vfmsq_f16(vtwo, vp3, vt3); vp4 = vfmsq_f16(vtwo, vp4, vt4); vp5 = vfmsq_f16(vtwo, vp5, vt5); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vts2 = vmulq_f16(vt2, vs2); const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one); const float16x8_t vts3 = vmulq_f16(vt3, vs3); const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one); const float16x8_t vts4 = vmulq_f16(vt4, vs4); const float16x8_t vsmo4 = vaddq_f16(vs4, vminus_one); const float16x8_t vts5 = vmulq_f16(vt5, vs5); const float16x8_t vsmo5 = vaddq_f16(vs5, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2); const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3); const float16x8_t vemo4 = vfmsq_f16(vsmo4, vp4, vts4); const float16x8_t vemo5 = vfmsq_f16(vsmo5, vp5, vts5); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo); const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo); const float16x8_t vepo4 = vaddq_f16(vemo4, vtwo); const float16x8_t vepo5 = vaddq_f16(vemo5, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); float16x8_t vrepo2 = vrecpeq_f16(vepo2); float16x8_t vrepo3 = vrecpeq_f16(vepo3); float16x8_t vrepo4 = vrecpeq_f16(vepo4); float16x8_t vrepo5 = vrecpeq_f16(vepo5); const float16x8_t verepo0 = vfmaq_f16(vminus_one, vrepo0, vepo0); const float16x8_t verepo1 = vfmaq_f16(vminus_one, vrepo1, vepo1); const float16x8_t verepo2 = vfmaq_f16(vminus_one, vrepo2, vepo2); const float16x8_t verepo3 = vfmaq_f16(vminus_one, vrepo3, vepo3); const float16x8_t verepo4 = vfmaq_f16(vminus_one, vrepo4, vepo4); const float16x8_t verepo5 = vfmaq_f16(vminus_one, vrepo5, vepo5); vrepo0 = vfmsq_f16(vrepo0, vrepo0, verepo0); vrepo1 = vfmsq_f16(vrepo1, vrepo1, verepo1); vrepo2 = vfmsq_f16(vrepo2, vrepo2, verepo2); vrepo3 = vfmsq_f16(vrepo3, vrepo3, verepo3); vrepo4 = vfmsq_f16(vrepo4, vrepo4, verepo4); vrepo5 = vfmsq_f16(vrepo5, vrepo5, verepo5); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); float16x8_t vy2 = vmulq_f16(vemo2, vrepo2); float16x8_t vy3 = vmulq_f16(vemo3, vrepo3); float16x8_t vy4 = vmulq_f16(vemo4, vrepo4); float16x8_t vy5 = vmulq_f16(vemo5, vrepo5); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vy2 = vbslq_f16(vsign_mask, vx2, vy2); vy3 = vbslq_f16(vsign_mask, vx3, vy3); vy4 = vbslq_f16(vsign_mask, vx4, vy4); vy5 = vbslq_f16(vsign_mask, vx5, vy5); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy4)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy5)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vfmaq_f16(vminus_one, vrepo, vepo); vrepo = vfmsq_f16(vrepo, vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vfmaq_f16(vminus_one, vrepo, vepo); vrepo = vfmsq_f16(vrepo, vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
10,187
41.806723
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-nr1fma-x56.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_nr1fma_x56( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 7 * sizeof(float16x8_t); n -= 7 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); float16x8_t vz2 = vabsq_f16(vx2); float16x8_t vz3 = vabsq_f16(vx3); float16x8_t vz4 = vabsq_f16(vx4); float16x8_t vz5 = vabsq_f16(vx5); float16x8_t vz6 = vabsq_f16(vx6); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); vz2 = vminq_f16(vz2, vsat_cutoff); vz3 = vminq_f16(vz3, vsat_cutoff); vz4 = vminq_f16(vz4, vsat_cutoff); vz5 = vminq_f16(vz5, vsat_cutoff); vz6 = vminq_f16(vz6, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e); float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e); float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e); float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e); float16x8_t vn6 = vfmaq_f16(vmagic_bias, vz6, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10)); vn2 = vsubq_f16(vn2, vmagic_bias); const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10)); vn3 = vsubq_f16(vn3, vmagic_bias); const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10)); vn4 = vsubq_f16(vn4, vmagic_bias); const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10)); vn5 = vsubq_f16(vn5, vmagic_bias); const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10)); vn6 = vsubq_f16(vn6, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2); const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2); const float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2); const float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2); const float16x8_t vt6 = vfmaq_f16(vz6, vn6, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2); float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3); float16x8_t vp4 = vfmaq_f16(vc2, vc3, vt4); float16x8_t vp5 = vfmaq_f16(vc2, vc3, vt5); float16x8_t vp6 = vfmaq_f16(vc2, vc3, vt6); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); vp2 = vfmsq_f16(vtwo, vp2, vt2); vp3 = vfmsq_f16(vtwo, vp3, vt3); vp4 = vfmsq_f16(vtwo, vp4, vt4); vp5 = vfmsq_f16(vtwo, vp5, vt5); vp6 = vfmsq_f16(vtwo, vp6, vt6); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vts2 = vmulq_f16(vt2, vs2); const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one); const float16x8_t vts3 = vmulq_f16(vt3, vs3); const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one); const float16x8_t vts4 = vmulq_f16(vt4, vs4); const float16x8_t vsmo4 = vaddq_f16(vs4, vminus_one); const float16x8_t vts5 = vmulq_f16(vt5, vs5); const float16x8_t vsmo5 = vaddq_f16(vs5, vminus_one); const float16x8_t vts6 = vmulq_f16(vt6, vs6); const float16x8_t vsmo6 = vaddq_f16(vs6, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2); const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3); const float16x8_t vemo4 = vfmsq_f16(vsmo4, vp4, vts4); const float16x8_t vemo5 = vfmsq_f16(vsmo5, vp5, vts5); const float16x8_t vemo6 = vfmsq_f16(vsmo6, vp6, vts6); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo); const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo); const float16x8_t vepo4 = vaddq_f16(vemo4, vtwo); const float16x8_t vepo5 = vaddq_f16(vemo5, vtwo); const float16x8_t vepo6 = vaddq_f16(vemo6, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); float16x8_t vrepo2 = vrecpeq_f16(vepo2); float16x8_t vrepo3 = vrecpeq_f16(vepo3); float16x8_t vrepo4 = vrecpeq_f16(vepo4); float16x8_t vrepo5 = vrecpeq_f16(vepo5); float16x8_t vrepo6 = vrecpeq_f16(vepo6); const float16x8_t verepo0 = vfmaq_f16(vminus_one, vrepo0, vepo0); const float16x8_t verepo1 = vfmaq_f16(vminus_one, vrepo1, vepo1); const float16x8_t verepo2 = vfmaq_f16(vminus_one, vrepo2, vepo2); const float16x8_t verepo3 = vfmaq_f16(vminus_one, vrepo3, vepo3); const float16x8_t verepo4 = vfmaq_f16(vminus_one, vrepo4, vepo4); const float16x8_t verepo5 = vfmaq_f16(vminus_one, vrepo5, vepo5); const float16x8_t verepo6 = vfmaq_f16(vminus_one, vrepo6, vepo6); vrepo0 = vfmsq_f16(vrepo0, vrepo0, verepo0); vrepo1 = vfmsq_f16(vrepo1, vrepo1, verepo1); vrepo2 = vfmsq_f16(vrepo2, vrepo2, verepo2); vrepo3 = vfmsq_f16(vrepo3, vrepo3, verepo3); vrepo4 = vfmsq_f16(vrepo4, vrepo4, verepo4); vrepo5 = vfmsq_f16(vrepo5, vrepo5, verepo5); vrepo6 = vfmsq_f16(vrepo6, vrepo6, verepo6); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); float16x8_t vy2 = vmulq_f16(vemo2, vrepo2); float16x8_t vy3 = vmulq_f16(vemo3, vrepo3); float16x8_t vy4 = vmulq_f16(vemo4, vrepo4); float16x8_t vy5 = vmulq_f16(vemo5, vrepo5); float16x8_t vy6 = vmulq_f16(vemo6, vrepo6); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vy2 = vbslq_f16(vsign_mask, vx2, vy2); vy3 = vbslq_f16(vsign_mask, vx3, vy3); vy4 = vbslq_f16(vsign_mask, vx4, vy4); vy5 = vbslq_f16(vsign_mask, vx5, vy5); vy6 = vbslq_f16(vsign_mask, vx6, vy6); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy4)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy5)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy6)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vfmaq_f16(vminus_one, vrepo, vepo); vrepo = vfmsq_f16(vrepo, vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vfmaq_f16(vminus_one, vrepo, vepo); vrepo = vfmsq_f16(vrepo, vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
11,207
42.610895
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-nr1fma-x64.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_nr1fma_x64( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 8 * sizeof(float16x8_t); n -= 8 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); float16x8_t vz2 = vabsq_f16(vx2); float16x8_t vz3 = vabsq_f16(vx3); float16x8_t vz4 = vabsq_f16(vx4); float16x8_t vz5 = vabsq_f16(vx5); float16x8_t vz6 = vabsq_f16(vx6); float16x8_t vz7 = vabsq_f16(vx7); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); vz2 = vminq_f16(vz2, vsat_cutoff); vz3 = vminq_f16(vz3, vsat_cutoff); vz4 = vminq_f16(vz4, vsat_cutoff); vz5 = vminq_f16(vz5, vsat_cutoff); vz6 = vminq_f16(vz6, vsat_cutoff); vz7 = vminq_f16(vz7, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e); float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e); float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e); float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e); float16x8_t vn6 = vfmaq_f16(vmagic_bias, vz6, vminus_log2e); float16x8_t vn7 = vfmaq_f16(vmagic_bias, vz7, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10)); vn2 = vsubq_f16(vn2, vmagic_bias); const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10)); vn3 = vsubq_f16(vn3, vmagic_bias); const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10)); vn4 = vsubq_f16(vn4, vmagic_bias); const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10)); vn5 = vsubq_f16(vn5, vmagic_bias); const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10)); vn6 = vsubq_f16(vn6, vmagic_bias); const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10)); vn7 = vsubq_f16(vn7, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2); const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2); const float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2); const float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2); const float16x8_t vt6 = vfmaq_f16(vz6, vn6, vln2); const float16x8_t vt7 = vfmaq_f16(vz7, vn7, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2); float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3); float16x8_t vp4 = vfmaq_f16(vc2, vc3, vt4); float16x8_t vp5 = vfmaq_f16(vc2, vc3, vt5); float16x8_t vp6 = vfmaq_f16(vc2, vc3, vt6); float16x8_t vp7 = vfmaq_f16(vc2, vc3, vt7); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); vp2 = vfmsq_f16(vtwo, vp2, vt2); vp3 = vfmsq_f16(vtwo, vp3, vt3); vp4 = vfmsq_f16(vtwo, vp4, vt4); vp5 = vfmsq_f16(vtwo, vp5, vt5); vp6 = vfmsq_f16(vtwo, vp6, vt6); vp7 = vfmsq_f16(vtwo, vp7, vt7); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vts2 = vmulq_f16(vt2, vs2); const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one); const float16x8_t vts3 = vmulq_f16(vt3, vs3); const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one); const float16x8_t vts4 = vmulq_f16(vt4, vs4); const float16x8_t vsmo4 = vaddq_f16(vs4, vminus_one); const float16x8_t vts5 = vmulq_f16(vt5, vs5); const float16x8_t vsmo5 = vaddq_f16(vs5, vminus_one); const float16x8_t vts6 = vmulq_f16(vt6, vs6); const float16x8_t vsmo6 = vaddq_f16(vs6, vminus_one); const float16x8_t vts7 = vmulq_f16(vt7, vs7); const float16x8_t vsmo7 = vaddq_f16(vs7, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2); const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3); const float16x8_t vemo4 = vfmsq_f16(vsmo4, vp4, vts4); const float16x8_t vemo5 = vfmsq_f16(vsmo5, vp5, vts5); const float16x8_t vemo6 = vfmsq_f16(vsmo6, vp6, vts6); const float16x8_t vemo7 = vfmsq_f16(vsmo7, vp7, vts7); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo); const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo); const float16x8_t vepo4 = vaddq_f16(vemo4, vtwo); const float16x8_t vepo5 = vaddq_f16(vemo5, vtwo); const float16x8_t vepo6 = vaddq_f16(vemo6, vtwo); const float16x8_t vepo7 = vaddq_f16(vemo7, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); float16x8_t vrepo2 = vrecpeq_f16(vepo2); float16x8_t vrepo3 = vrecpeq_f16(vepo3); float16x8_t vrepo4 = vrecpeq_f16(vepo4); float16x8_t vrepo5 = vrecpeq_f16(vepo5); float16x8_t vrepo6 = vrecpeq_f16(vepo6); float16x8_t vrepo7 = vrecpeq_f16(vepo7); const float16x8_t verepo0 = vfmaq_f16(vminus_one, vrepo0, vepo0); const float16x8_t verepo1 = vfmaq_f16(vminus_one, vrepo1, vepo1); const float16x8_t verepo2 = vfmaq_f16(vminus_one, vrepo2, vepo2); const float16x8_t verepo3 = vfmaq_f16(vminus_one, vrepo3, vepo3); const float16x8_t verepo4 = vfmaq_f16(vminus_one, vrepo4, vepo4); const float16x8_t verepo5 = vfmaq_f16(vminus_one, vrepo5, vepo5); const float16x8_t verepo6 = vfmaq_f16(vminus_one, vrepo6, vepo6); const float16x8_t verepo7 = vfmaq_f16(vminus_one, vrepo7, vepo7); vrepo0 = vfmsq_f16(vrepo0, vrepo0, verepo0); vrepo1 = vfmsq_f16(vrepo1, vrepo1, verepo1); vrepo2 = vfmsq_f16(vrepo2, vrepo2, verepo2); vrepo3 = vfmsq_f16(vrepo3, vrepo3, verepo3); vrepo4 = vfmsq_f16(vrepo4, vrepo4, verepo4); vrepo5 = vfmsq_f16(vrepo5, vrepo5, verepo5); vrepo6 = vfmsq_f16(vrepo6, vrepo6, verepo6); vrepo7 = vfmsq_f16(vrepo7, vrepo7, verepo7); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); float16x8_t vy2 = vmulq_f16(vemo2, vrepo2); float16x8_t vy3 = vmulq_f16(vemo3, vrepo3); float16x8_t vy4 = vmulq_f16(vemo4, vrepo4); float16x8_t vy5 = vmulq_f16(vemo5, vrepo5); float16x8_t vy6 = vmulq_f16(vemo6, vrepo6); float16x8_t vy7 = vmulq_f16(vemo7, vrepo7); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vy2 = vbslq_f16(vsign_mask, vx2, vy2); vy3 = vbslq_f16(vsign_mask, vx3, vy3); vy4 = vbslq_f16(vsign_mask, vx4, vy4); vy5 = vbslq_f16(vsign_mask, vx5, vy5); vy6 = vbslq_f16(vsign_mask, vx6, vy6); vy7 = vbslq_f16(vsign_mask, vx7, vy7); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy4)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy5)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy6)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy7)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vfmaq_f16(vminus_one, vrepo, vepo); vrepo = vfmsq_f16(vrepo, vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vfmaq_f16(vminus_one, vrepo, vepo); vrepo = vfmsq_f16(vrepo, vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
12,227
43.304348
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-nr1fma-x72.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_nr1fma_x72( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 9 * sizeof(float16x8_t); n -= 9 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx8 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); float16x8_t vz2 = vabsq_f16(vx2); float16x8_t vz3 = vabsq_f16(vx3); float16x8_t vz4 = vabsq_f16(vx4); float16x8_t vz5 = vabsq_f16(vx5); float16x8_t vz6 = vabsq_f16(vx6); float16x8_t vz7 = vabsq_f16(vx7); float16x8_t vz8 = vabsq_f16(vx8); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); vz2 = vminq_f16(vz2, vsat_cutoff); vz3 = vminq_f16(vz3, vsat_cutoff); vz4 = vminq_f16(vz4, vsat_cutoff); vz5 = vminq_f16(vz5, vsat_cutoff); vz6 = vminq_f16(vz6, vsat_cutoff); vz7 = vminq_f16(vz7, vsat_cutoff); vz8 = vminq_f16(vz8, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e); float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e); float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e); float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e); float16x8_t vn6 = vfmaq_f16(vmagic_bias, vz6, vminus_log2e); float16x8_t vn7 = vfmaq_f16(vmagic_bias, vz7, vminus_log2e); float16x8_t vn8 = vfmaq_f16(vmagic_bias, vz8, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10)); vn2 = vsubq_f16(vn2, vmagic_bias); const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10)); vn3 = vsubq_f16(vn3, vmagic_bias); const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10)); vn4 = vsubq_f16(vn4, vmagic_bias); const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10)); vn5 = vsubq_f16(vn5, vmagic_bias); const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10)); vn6 = vsubq_f16(vn6, vmagic_bias); const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10)); vn7 = vsubq_f16(vn7, vmagic_bias); const float16x8_t vs8 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn8), 10)); vn8 = vsubq_f16(vn8, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2); const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2); const float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2); const float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2); const float16x8_t vt6 = vfmaq_f16(vz6, vn6, vln2); const float16x8_t vt7 = vfmaq_f16(vz7, vn7, vln2); const float16x8_t vt8 = vfmaq_f16(vz8, vn8, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2); float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3); float16x8_t vp4 = vfmaq_f16(vc2, vc3, vt4); float16x8_t vp5 = vfmaq_f16(vc2, vc3, vt5); float16x8_t vp6 = vfmaq_f16(vc2, vc3, vt6); float16x8_t vp7 = vfmaq_f16(vc2, vc3, vt7); float16x8_t vp8 = vfmaq_f16(vc2, vc3, vt8); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); vp2 = vfmsq_f16(vtwo, vp2, vt2); vp3 = vfmsq_f16(vtwo, vp3, vt3); vp4 = vfmsq_f16(vtwo, vp4, vt4); vp5 = vfmsq_f16(vtwo, vp5, vt5); vp6 = vfmsq_f16(vtwo, vp6, vt6); vp7 = vfmsq_f16(vtwo, vp7, vt7); vp8 = vfmsq_f16(vtwo, vp8, vt8); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vts2 = vmulq_f16(vt2, vs2); const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one); const float16x8_t vts3 = vmulq_f16(vt3, vs3); const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one); const float16x8_t vts4 = vmulq_f16(vt4, vs4); const float16x8_t vsmo4 = vaddq_f16(vs4, vminus_one); const float16x8_t vts5 = vmulq_f16(vt5, vs5); const float16x8_t vsmo5 = vaddq_f16(vs5, vminus_one); const float16x8_t vts6 = vmulq_f16(vt6, vs6); const float16x8_t vsmo6 = vaddq_f16(vs6, vminus_one); const float16x8_t vts7 = vmulq_f16(vt7, vs7); const float16x8_t vsmo7 = vaddq_f16(vs7, vminus_one); const float16x8_t vts8 = vmulq_f16(vt8, vs8); const float16x8_t vsmo8 = vaddq_f16(vs8, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2); const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3); const float16x8_t vemo4 = vfmsq_f16(vsmo4, vp4, vts4); const float16x8_t vemo5 = vfmsq_f16(vsmo5, vp5, vts5); const float16x8_t vemo6 = vfmsq_f16(vsmo6, vp6, vts6); const float16x8_t vemo7 = vfmsq_f16(vsmo7, vp7, vts7); const float16x8_t vemo8 = vfmsq_f16(vsmo8, vp8, vts8); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo); const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo); const float16x8_t vepo4 = vaddq_f16(vemo4, vtwo); const float16x8_t vepo5 = vaddq_f16(vemo5, vtwo); const float16x8_t vepo6 = vaddq_f16(vemo6, vtwo); const float16x8_t vepo7 = vaddq_f16(vemo7, vtwo); const float16x8_t vepo8 = vaddq_f16(vemo8, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); float16x8_t vrepo2 = vrecpeq_f16(vepo2); float16x8_t vrepo3 = vrecpeq_f16(vepo3); float16x8_t vrepo4 = vrecpeq_f16(vepo4); float16x8_t vrepo5 = vrecpeq_f16(vepo5); float16x8_t vrepo6 = vrecpeq_f16(vepo6); float16x8_t vrepo7 = vrecpeq_f16(vepo7); float16x8_t vrepo8 = vrecpeq_f16(vepo8); const float16x8_t verepo0 = vfmaq_f16(vminus_one, vrepo0, vepo0); const float16x8_t verepo1 = vfmaq_f16(vminus_one, vrepo1, vepo1); const float16x8_t verepo2 = vfmaq_f16(vminus_one, vrepo2, vepo2); const float16x8_t verepo3 = vfmaq_f16(vminus_one, vrepo3, vepo3); const float16x8_t verepo4 = vfmaq_f16(vminus_one, vrepo4, vepo4); const float16x8_t verepo5 = vfmaq_f16(vminus_one, vrepo5, vepo5); const float16x8_t verepo6 = vfmaq_f16(vminus_one, vrepo6, vepo6); const float16x8_t verepo7 = vfmaq_f16(vminus_one, vrepo7, vepo7); const float16x8_t verepo8 = vfmaq_f16(vminus_one, vrepo8, vepo8); vrepo0 = vfmsq_f16(vrepo0, vrepo0, verepo0); vrepo1 = vfmsq_f16(vrepo1, vrepo1, verepo1); vrepo2 = vfmsq_f16(vrepo2, vrepo2, verepo2); vrepo3 = vfmsq_f16(vrepo3, vrepo3, verepo3); vrepo4 = vfmsq_f16(vrepo4, vrepo4, verepo4); vrepo5 = vfmsq_f16(vrepo5, vrepo5, verepo5); vrepo6 = vfmsq_f16(vrepo6, vrepo6, verepo6); vrepo7 = vfmsq_f16(vrepo7, vrepo7, verepo7); vrepo8 = vfmsq_f16(vrepo8, vrepo8, verepo8); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); float16x8_t vy2 = vmulq_f16(vemo2, vrepo2); float16x8_t vy3 = vmulq_f16(vemo3, vrepo3); float16x8_t vy4 = vmulq_f16(vemo4, vrepo4); float16x8_t vy5 = vmulq_f16(vemo5, vrepo5); float16x8_t vy6 = vmulq_f16(vemo6, vrepo6); float16x8_t vy7 = vmulq_f16(vemo7, vrepo7); float16x8_t vy8 = vmulq_f16(vemo8, vrepo8); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vy2 = vbslq_f16(vsign_mask, vx2, vy2); vy3 = vbslq_f16(vsign_mask, vx3, vy3); vy4 = vbslq_f16(vsign_mask, vx4, vy4); vy5 = vbslq_f16(vsign_mask, vx5, vy5); vy6 = vbslq_f16(vsign_mask, vx6, vy6); vy7 = vbslq_f16(vsign_mask, vx7, vy7); vy8 = vbslq_f16(vsign_mask, vx8, vy8); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy4)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy5)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy6)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy7)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy8)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vfmaq_f16(vminus_one, vrepo, vepo); vrepo = vfmsq_f16(vrepo, vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vfmaq_f16(vminus_one, vrepo, vepo); vrepo = vfmsq_f16(vrepo, vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
13,247
43.908475
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-nr1fma-x8.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_nr1fma_x8( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vfmaq_f16(vminus_one, vrepo, vepo); vrepo = vfmsq_f16(vrepo, vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vfmaq_f16(vminus_one, vrepo, vepo); vrepo = vfmsq_f16(vrepo, vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
3,981
34.553571
93
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-nr1fma-x80.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_nr1fma_x80( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 10 * sizeof(float16x8_t); n -= 10 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx8 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx9 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); float16x8_t vz2 = vabsq_f16(vx2); float16x8_t vz3 = vabsq_f16(vx3); float16x8_t vz4 = vabsq_f16(vx4); float16x8_t vz5 = vabsq_f16(vx5); float16x8_t vz6 = vabsq_f16(vx6); float16x8_t vz7 = vabsq_f16(vx7); float16x8_t vz8 = vabsq_f16(vx8); float16x8_t vz9 = vabsq_f16(vx9); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); vz2 = vminq_f16(vz2, vsat_cutoff); vz3 = vminq_f16(vz3, vsat_cutoff); vz4 = vminq_f16(vz4, vsat_cutoff); vz5 = vminq_f16(vz5, vsat_cutoff); vz6 = vminq_f16(vz6, vsat_cutoff); vz7 = vminq_f16(vz7, vsat_cutoff); vz8 = vminq_f16(vz8, vsat_cutoff); vz9 = vminq_f16(vz9, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e); float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e); float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e); float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e); float16x8_t vn6 = vfmaq_f16(vmagic_bias, vz6, vminus_log2e); float16x8_t vn7 = vfmaq_f16(vmagic_bias, vz7, vminus_log2e); float16x8_t vn8 = vfmaq_f16(vmagic_bias, vz8, vminus_log2e); float16x8_t vn9 = vfmaq_f16(vmagic_bias, vz9, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10)); vn2 = vsubq_f16(vn2, vmagic_bias); const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10)); vn3 = vsubq_f16(vn3, vmagic_bias); const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10)); vn4 = vsubq_f16(vn4, vmagic_bias); const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10)); vn5 = vsubq_f16(vn5, vmagic_bias); const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10)); vn6 = vsubq_f16(vn6, vmagic_bias); const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10)); vn7 = vsubq_f16(vn7, vmagic_bias); const float16x8_t vs8 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn8), 10)); vn8 = vsubq_f16(vn8, vmagic_bias); const float16x8_t vs9 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn9), 10)); vn9 = vsubq_f16(vn9, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2); const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2); const float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2); const float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2); const float16x8_t vt6 = vfmaq_f16(vz6, vn6, vln2); const float16x8_t vt7 = vfmaq_f16(vz7, vn7, vln2); const float16x8_t vt8 = vfmaq_f16(vz8, vn8, vln2); const float16x8_t vt9 = vfmaq_f16(vz9, vn9, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2); float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3); float16x8_t vp4 = vfmaq_f16(vc2, vc3, vt4); float16x8_t vp5 = vfmaq_f16(vc2, vc3, vt5); float16x8_t vp6 = vfmaq_f16(vc2, vc3, vt6); float16x8_t vp7 = vfmaq_f16(vc2, vc3, vt7); float16x8_t vp8 = vfmaq_f16(vc2, vc3, vt8); float16x8_t vp9 = vfmaq_f16(vc2, vc3, vt9); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); vp2 = vfmsq_f16(vtwo, vp2, vt2); vp3 = vfmsq_f16(vtwo, vp3, vt3); vp4 = vfmsq_f16(vtwo, vp4, vt4); vp5 = vfmsq_f16(vtwo, vp5, vt5); vp6 = vfmsq_f16(vtwo, vp6, vt6); vp7 = vfmsq_f16(vtwo, vp7, vt7); vp8 = vfmsq_f16(vtwo, vp8, vt8); vp9 = vfmsq_f16(vtwo, vp9, vt9); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vts2 = vmulq_f16(vt2, vs2); const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one); const float16x8_t vts3 = vmulq_f16(vt3, vs3); const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one); const float16x8_t vts4 = vmulq_f16(vt4, vs4); const float16x8_t vsmo4 = vaddq_f16(vs4, vminus_one); const float16x8_t vts5 = vmulq_f16(vt5, vs5); const float16x8_t vsmo5 = vaddq_f16(vs5, vminus_one); const float16x8_t vts6 = vmulq_f16(vt6, vs6); const float16x8_t vsmo6 = vaddq_f16(vs6, vminus_one); const float16x8_t vts7 = vmulq_f16(vt7, vs7); const float16x8_t vsmo7 = vaddq_f16(vs7, vminus_one); const float16x8_t vts8 = vmulq_f16(vt8, vs8); const float16x8_t vsmo8 = vaddq_f16(vs8, vminus_one); const float16x8_t vts9 = vmulq_f16(vt9, vs9); const float16x8_t vsmo9 = vaddq_f16(vs9, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2); const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3); const float16x8_t vemo4 = vfmsq_f16(vsmo4, vp4, vts4); const float16x8_t vemo5 = vfmsq_f16(vsmo5, vp5, vts5); const float16x8_t vemo6 = vfmsq_f16(vsmo6, vp6, vts6); const float16x8_t vemo7 = vfmsq_f16(vsmo7, vp7, vts7); const float16x8_t vemo8 = vfmsq_f16(vsmo8, vp8, vts8); const float16x8_t vemo9 = vfmsq_f16(vsmo9, vp9, vts9); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo); const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo); const float16x8_t vepo4 = vaddq_f16(vemo4, vtwo); const float16x8_t vepo5 = vaddq_f16(vemo5, vtwo); const float16x8_t vepo6 = vaddq_f16(vemo6, vtwo); const float16x8_t vepo7 = vaddq_f16(vemo7, vtwo); const float16x8_t vepo8 = vaddq_f16(vemo8, vtwo); const float16x8_t vepo9 = vaddq_f16(vemo9, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); float16x8_t vrepo2 = vrecpeq_f16(vepo2); float16x8_t vrepo3 = vrecpeq_f16(vepo3); float16x8_t vrepo4 = vrecpeq_f16(vepo4); float16x8_t vrepo5 = vrecpeq_f16(vepo5); float16x8_t vrepo6 = vrecpeq_f16(vepo6); float16x8_t vrepo7 = vrecpeq_f16(vepo7); float16x8_t vrepo8 = vrecpeq_f16(vepo8); float16x8_t vrepo9 = vrecpeq_f16(vepo9); const float16x8_t verepo0 = vfmaq_f16(vminus_one, vrepo0, vepo0); const float16x8_t verepo1 = vfmaq_f16(vminus_one, vrepo1, vepo1); const float16x8_t verepo2 = vfmaq_f16(vminus_one, vrepo2, vepo2); const float16x8_t verepo3 = vfmaq_f16(vminus_one, vrepo3, vepo3); const float16x8_t verepo4 = vfmaq_f16(vminus_one, vrepo4, vepo4); const float16x8_t verepo5 = vfmaq_f16(vminus_one, vrepo5, vepo5); const float16x8_t verepo6 = vfmaq_f16(vminus_one, vrepo6, vepo6); const float16x8_t verepo7 = vfmaq_f16(vminus_one, vrepo7, vepo7); const float16x8_t verepo8 = vfmaq_f16(vminus_one, vrepo8, vepo8); const float16x8_t verepo9 = vfmaq_f16(vminus_one, vrepo9, vepo9); vrepo0 = vfmsq_f16(vrepo0, vrepo0, verepo0); vrepo1 = vfmsq_f16(vrepo1, vrepo1, verepo1); vrepo2 = vfmsq_f16(vrepo2, vrepo2, verepo2); vrepo3 = vfmsq_f16(vrepo3, vrepo3, verepo3); vrepo4 = vfmsq_f16(vrepo4, vrepo4, verepo4); vrepo5 = vfmsq_f16(vrepo5, vrepo5, verepo5); vrepo6 = vfmsq_f16(vrepo6, vrepo6, verepo6); vrepo7 = vfmsq_f16(vrepo7, vrepo7, verepo7); vrepo8 = vfmsq_f16(vrepo8, vrepo8, verepo8); vrepo9 = vfmsq_f16(vrepo9, vrepo9, verepo9); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); float16x8_t vy2 = vmulq_f16(vemo2, vrepo2); float16x8_t vy3 = vmulq_f16(vemo3, vrepo3); float16x8_t vy4 = vmulq_f16(vemo4, vrepo4); float16x8_t vy5 = vmulq_f16(vemo5, vrepo5); float16x8_t vy6 = vmulq_f16(vemo6, vrepo6); float16x8_t vy7 = vmulq_f16(vemo7, vrepo7); float16x8_t vy8 = vmulq_f16(vemo8, vrepo8); float16x8_t vy9 = vmulq_f16(vemo9, vrepo9); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vy2 = vbslq_f16(vsign_mask, vx2, vy2); vy3 = vbslq_f16(vsign_mask, vx3, vy3); vy4 = vbslq_f16(vsign_mask, vx4, vy4); vy5 = vbslq_f16(vsign_mask, vx5, vy5); vy6 = vbslq_f16(vsign_mask, vx6, vy6); vy7 = vbslq_f16(vsign_mask, vx7, vy7); vy8 = vbslq_f16(vsign_mask, vx8, vy8); vy9 = vbslq_f16(vsign_mask, vx9, vy9); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy4)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy5)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy6)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy7)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy8)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy9)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vfmaq_f16(vminus_one, vrepo, vepo); vrepo = vfmsq_f16(vrepo, vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vfmaq_f16(vminus_one, vrepo, vepo); vrepo = vfmsq_f16(vrepo, vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
14,269
44.44586
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-nr1recps-x16.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_nr1recps_x16( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 2 * sizeof(float16x8_t); n -= 2 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); const float16x8_t verepo0 = vrecpsq_f16(vrepo0, vepo0); const float16x8_t verepo1 = vrecpsq_f16(vrepo1, vepo1); vrepo0 = vmulq_f16(vrepo0, verepo0); vrepo1 = vmulq_f16(vrepo1, verepo1); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vrecpsq_f16(vrepo, vepo); vrepo = vmulq_f16(vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vrecpsq_f16(vrepo, vepo); vrepo = vmulq_f16(vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
6,039
36.283951
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-nr1recps-x24.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_nr1recps_x24( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 3 * sizeof(float16x8_t); n -= 3 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); float16x8_t vz2 = vabsq_f16(vx2); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); vz2 = vminq_f16(vz2, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10)); vn2 = vsubq_f16(vn2, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); vp2 = vfmsq_f16(vtwo, vp2, vt2); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vts2 = vmulq_f16(vt2, vs2); const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); float16x8_t vrepo2 = vrecpeq_f16(vepo2); const float16x8_t verepo0 = vrecpsq_f16(vrepo0, vepo0); const float16x8_t verepo1 = vrecpsq_f16(vrepo1, vepo1); const float16x8_t verepo2 = vrecpsq_f16(vrepo2, vepo2); vrepo0 = vmulq_f16(vrepo0, verepo0); vrepo1 = vmulq_f16(vrepo1, verepo1); vrepo2 = vmulq_f16(vrepo2, verepo2); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); float16x8_t vy2 = vmulq_f16(vemo2, vrepo2); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vy2 = vbslq_f16(vsign_mask, vx2, vy2); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vrecpsq_f16(vrepo, vepo); vrepo = vmulq_f16(vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vrecpsq_f16(vrepo, vepo); vrepo = vmulq_f16(vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
7,041
37.906077
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-nr1recps-x32.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_nr1recps_x32( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 4 * sizeof(float16x8_t); n -= 4 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); float16x8_t vz2 = vabsq_f16(vx2); float16x8_t vz3 = vabsq_f16(vx3); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); vz2 = vminq_f16(vz2, vsat_cutoff); vz3 = vminq_f16(vz3, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e); float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10)); vn2 = vsubq_f16(vn2, vmagic_bias); const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10)); vn3 = vsubq_f16(vn3, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2); const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2); float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); vp2 = vfmsq_f16(vtwo, vp2, vt2); vp3 = vfmsq_f16(vtwo, vp3, vt3); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vts2 = vmulq_f16(vt2, vs2); const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one); const float16x8_t vts3 = vmulq_f16(vt3, vs3); const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2); const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo); const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); float16x8_t vrepo2 = vrecpeq_f16(vepo2); float16x8_t vrepo3 = vrecpeq_f16(vepo3); const float16x8_t verepo0 = vrecpsq_f16(vrepo0, vepo0); const float16x8_t verepo1 = vrecpsq_f16(vrepo1, vepo1); const float16x8_t verepo2 = vrecpsq_f16(vrepo2, vepo2); const float16x8_t verepo3 = vrecpsq_f16(vrepo3, vepo3); vrepo0 = vmulq_f16(vrepo0, verepo0); vrepo1 = vmulq_f16(vrepo1, verepo1); vrepo2 = vmulq_f16(vrepo2, verepo2); vrepo3 = vmulq_f16(vrepo3, verepo3); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); float16x8_t vy2 = vmulq_f16(vemo2, vrepo2); float16x8_t vy3 = vmulq_f16(vemo3, vrepo3); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vy2 = vbslq_f16(vsign_mask, vx2, vy2); vy3 = vbslq_f16(vsign_mask, vx3, vy3); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vrecpsq_f16(vrepo, vepo); vrepo = vmulq_f16(vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vrecpsq_f16(vrepo, vepo); vrepo = vmulq_f16(vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
8,043
39.22
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-nr1recps-x40.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_nr1recps_x40( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 5 * sizeof(float16x8_t); n -= 5 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); float16x8_t vz2 = vabsq_f16(vx2); float16x8_t vz3 = vabsq_f16(vx3); float16x8_t vz4 = vabsq_f16(vx4); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); vz2 = vminq_f16(vz2, vsat_cutoff); vz3 = vminq_f16(vz3, vsat_cutoff); vz4 = vminq_f16(vz4, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e); float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e); float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10)); vn2 = vsubq_f16(vn2, vmagic_bias); const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10)); vn3 = vsubq_f16(vn3, vmagic_bias); const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10)); vn4 = vsubq_f16(vn4, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2); const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2); const float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2); float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3); float16x8_t vp4 = vfmaq_f16(vc2, vc3, vt4); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); vp2 = vfmsq_f16(vtwo, vp2, vt2); vp3 = vfmsq_f16(vtwo, vp3, vt3); vp4 = vfmsq_f16(vtwo, vp4, vt4); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vts2 = vmulq_f16(vt2, vs2); const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one); const float16x8_t vts3 = vmulq_f16(vt3, vs3); const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one); const float16x8_t vts4 = vmulq_f16(vt4, vs4); const float16x8_t vsmo4 = vaddq_f16(vs4, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2); const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3); const float16x8_t vemo4 = vfmsq_f16(vsmo4, vp4, vts4); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo); const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo); const float16x8_t vepo4 = vaddq_f16(vemo4, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); float16x8_t vrepo2 = vrecpeq_f16(vepo2); float16x8_t vrepo3 = vrecpeq_f16(vepo3); float16x8_t vrepo4 = vrecpeq_f16(vepo4); const float16x8_t verepo0 = vrecpsq_f16(vrepo0, vepo0); const float16x8_t verepo1 = vrecpsq_f16(vrepo1, vepo1); const float16x8_t verepo2 = vrecpsq_f16(vrepo2, vepo2); const float16x8_t verepo3 = vrecpsq_f16(vrepo3, vepo3); const float16x8_t verepo4 = vrecpsq_f16(vrepo4, vepo4); vrepo0 = vmulq_f16(vrepo0, verepo0); vrepo1 = vmulq_f16(vrepo1, verepo1); vrepo2 = vmulq_f16(vrepo2, verepo2); vrepo3 = vmulq_f16(vrepo3, verepo3); vrepo4 = vmulq_f16(vrepo4, verepo4); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); float16x8_t vy2 = vmulq_f16(vemo2, vrepo2); float16x8_t vy3 = vmulq_f16(vemo3, vrepo3); float16x8_t vy4 = vmulq_f16(vemo4, vrepo4); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vy2 = vbslq_f16(vsign_mask, vx2, vy2); vy3 = vbslq_f16(vsign_mask, vx3, vy3); vy4 = vbslq_f16(vsign_mask, vx4, vy4); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy4)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vrecpsq_f16(vrepo, vepo); vrepo = vmulq_f16(vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vrecpsq_f16(vrepo, vepo); vrepo = vmulq_f16(vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
9,045
40.305936
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-nr1recps-x48.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_nr1recps_x48( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 6 * sizeof(float16x8_t); n -= 6 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); float16x8_t vz2 = vabsq_f16(vx2); float16x8_t vz3 = vabsq_f16(vx3); float16x8_t vz4 = vabsq_f16(vx4); float16x8_t vz5 = vabsq_f16(vx5); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); vz2 = vminq_f16(vz2, vsat_cutoff); vz3 = vminq_f16(vz3, vsat_cutoff); vz4 = vminq_f16(vz4, vsat_cutoff); vz5 = vminq_f16(vz5, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e); float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e); float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e); float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10)); vn2 = vsubq_f16(vn2, vmagic_bias); const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10)); vn3 = vsubq_f16(vn3, vmagic_bias); const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10)); vn4 = vsubq_f16(vn4, vmagic_bias); const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10)); vn5 = vsubq_f16(vn5, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2); const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2); const float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2); const float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2); float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3); float16x8_t vp4 = vfmaq_f16(vc2, vc3, vt4); float16x8_t vp5 = vfmaq_f16(vc2, vc3, vt5); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); vp2 = vfmsq_f16(vtwo, vp2, vt2); vp3 = vfmsq_f16(vtwo, vp3, vt3); vp4 = vfmsq_f16(vtwo, vp4, vt4); vp5 = vfmsq_f16(vtwo, vp5, vt5); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vts2 = vmulq_f16(vt2, vs2); const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one); const float16x8_t vts3 = vmulq_f16(vt3, vs3); const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one); const float16x8_t vts4 = vmulq_f16(vt4, vs4); const float16x8_t vsmo4 = vaddq_f16(vs4, vminus_one); const float16x8_t vts5 = vmulq_f16(vt5, vs5); const float16x8_t vsmo5 = vaddq_f16(vs5, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2); const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3); const float16x8_t vemo4 = vfmsq_f16(vsmo4, vp4, vts4); const float16x8_t vemo5 = vfmsq_f16(vsmo5, vp5, vts5); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo); const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo); const float16x8_t vepo4 = vaddq_f16(vemo4, vtwo); const float16x8_t vepo5 = vaddq_f16(vemo5, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); float16x8_t vrepo2 = vrecpeq_f16(vepo2); float16x8_t vrepo3 = vrecpeq_f16(vepo3); float16x8_t vrepo4 = vrecpeq_f16(vepo4); float16x8_t vrepo5 = vrecpeq_f16(vepo5); const float16x8_t verepo0 = vrecpsq_f16(vrepo0, vepo0); const float16x8_t verepo1 = vrecpsq_f16(vrepo1, vepo1); const float16x8_t verepo2 = vrecpsq_f16(vrepo2, vepo2); const float16x8_t verepo3 = vrecpsq_f16(vrepo3, vepo3); const float16x8_t verepo4 = vrecpsq_f16(vrepo4, vepo4); const float16x8_t verepo5 = vrecpsq_f16(vrepo5, vepo5); vrepo0 = vmulq_f16(vrepo0, verepo0); vrepo1 = vmulq_f16(vrepo1, verepo1); vrepo2 = vmulq_f16(vrepo2, verepo2); vrepo3 = vmulq_f16(vrepo3, verepo3); vrepo4 = vmulq_f16(vrepo4, verepo4); vrepo5 = vmulq_f16(vrepo5, verepo5); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); float16x8_t vy2 = vmulq_f16(vemo2, vrepo2); float16x8_t vy3 = vmulq_f16(vemo3, vrepo3); float16x8_t vy4 = vmulq_f16(vemo4, vrepo4); float16x8_t vy5 = vmulq_f16(vemo5, vrepo5); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vy2 = vbslq_f16(vsign_mask, vx2, vy2); vy3 = vbslq_f16(vsign_mask, vx3, vy3); vy4 = vbslq_f16(vsign_mask, vx4, vy4); vy5 = vbslq_f16(vsign_mask, vx5, vy5); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy4)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy5)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vrecpsq_f16(vrepo, vepo); vrepo = vmulq_f16(vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vrecpsq_f16(vrepo, vepo); vrepo = vmulq_f16(vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
10,047
41.218487
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-nr1recps-x56.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_nr1recps_x56( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 7 * sizeof(float16x8_t); n -= 7 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); float16x8_t vz2 = vabsq_f16(vx2); float16x8_t vz3 = vabsq_f16(vx3); float16x8_t vz4 = vabsq_f16(vx4); float16x8_t vz5 = vabsq_f16(vx5); float16x8_t vz6 = vabsq_f16(vx6); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); vz2 = vminq_f16(vz2, vsat_cutoff); vz3 = vminq_f16(vz3, vsat_cutoff); vz4 = vminq_f16(vz4, vsat_cutoff); vz5 = vminq_f16(vz5, vsat_cutoff); vz6 = vminq_f16(vz6, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e); float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e); float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e); float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e); float16x8_t vn6 = vfmaq_f16(vmagic_bias, vz6, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10)); vn2 = vsubq_f16(vn2, vmagic_bias); const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10)); vn3 = vsubq_f16(vn3, vmagic_bias); const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10)); vn4 = vsubq_f16(vn4, vmagic_bias); const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10)); vn5 = vsubq_f16(vn5, vmagic_bias); const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10)); vn6 = vsubq_f16(vn6, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2); const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2); const float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2); const float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2); const float16x8_t vt6 = vfmaq_f16(vz6, vn6, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2); float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3); float16x8_t vp4 = vfmaq_f16(vc2, vc3, vt4); float16x8_t vp5 = vfmaq_f16(vc2, vc3, vt5); float16x8_t vp6 = vfmaq_f16(vc2, vc3, vt6); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); vp2 = vfmsq_f16(vtwo, vp2, vt2); vp3 = vfmsq_f16(vtwo, vp3, vt3); vp4 = vfmsq_f16(vtwo, vp4, vt4); vp5 = vfmsq_f16(vtwo, vp5, vt5); vp6 = vfmsq_f16(vtwo, vp6, vt6); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vts2 = vmulq_f16(vt2, vs2); const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one); const float16x8_t vts3 = vmulq_f16(vt3, vs3); const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one); const float16x8_t vts4 = vmulq_f16(vt4, vs4); const float16x8_t vsmo4 = vaddq_f16(vs4, vminus_one); const float16x8_t vts5 = vmulq_f16(vt5, vs5); const float16x8_t vsmo5 = vaddq_f16(vs5, vminus_one); const float16x8_t vts6 = vmulq_f16(vt6, vs6); const float16x8_t vsmo6 = vaddq_f16(vs6, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2); const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3); const float16x8_t vemo4 = vfmsq_f16(vsmo4, vp4, vts4); const float16x8_t vemo5 = vfmsq_f16(vsmo5, vp5, vts5); const float16x8_t vemo6 = vfmsq_f16(vsmo6, vp6, vts6); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo); const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo); const float16x8_t vepo4 = vaddq_f16(vemo4, vtwo); const float16x8_t vepo5 = vaddq_f16(vemo5, vtwo); const float16x8_t vepo6 = vaddq_f16(vemo6, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); float16x8_t vrepo2 = vrecpeq_f16(vepo2); float16x8_t vrepo3 = vrecpeq_f16(vepo3); float16x8_t vrepo4 = vrecpeq_f16(vepo4); float16x8_t vrepo5 = vrecpeq_f16(vepo5); float16x8_t vrepo6 = vrecpeq_f16(vepo6); const float16x8_t verepo0 = vrecpsq_f16(vrepo0, vepo0); const float16x8_t verepo1 = vrecpsq_f16(vrepo1, vepo1); const float16x8_t verepo2 = vrecpsq_f16(vrepo2, vepo2); const float16x8_t verepo3 = vrecpsq_f16(vrepo3, vepo3); const float16x8_t verepo4 = vrecpsq_f16(vrepo4, vepo4); const float16x8_t verepo5 = vrecpsq_f16(vrepo5, vepo5); const float16x8_t verepo6 = vrecpsq_f16(vrepo6, vepo6); vrepo0 = vmulq_f16(vrepo0, verepo0); vrepo1 = vmulq_f16(vrepo1, verepo1); vrepo2 = vmulq_f16(vrepo2, verepo2); vrepo3 = vmulq_f16(vrepo3, verepo3); vrepo4 = vmulq_f16(vrepo4, verepo4); vrepo5 = vmulq_f16(vrepo5, verepo5); vrepo6 = vmulq_f16(vrepo6, verepo6); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); float16x8_t vy2 = vmulq_f16(vemo2, vrepo2); float16x8_t vy3 = vmulq_f16(vemo3, vrepo3); float16x8_t vy4 = vmulq_f16(vemo4, vrepo4); float16x8_t vy5 = vmulq_f16(vemo5, vrepo5); float16x8_t vy6 = vmulq_f16(vemo6, vrepo6); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vy2 = vbslq_f16(vsign_mask, vx2, vy2); vy3 = vbslq_f16(vsign_mask, vx3, vy3); vy4 = vbslq_f16(vsign_mask, vx4, vy4); vy5 = vbslq_f16(vsign_mask, vx5, vy5); vy6 = vbslq_f16(vsign_mask, vx6, vy6); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy4)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy5)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy6)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vrecpsq_f16(vrepo, vepo); vrepo = vmulq_f16(vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vrecpsq_f16(vrepo, vepo); vrepo = vmulq_f16(vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
11,049
41.996109
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-nr1recps-x64.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_nr1recps_x64( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 8 * sizeof(float16x8_t); n -= 8 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); float16x8_t vz2 = vabsq_f16(vx2); float16x8_t vz3 = vabsq_f16(vx3); float16x8_t vz4 = vabsq_f16(vx4); float16x8_t vz5 = vabsq_f16(vx5); float16x8_t vz6 = vabsq_f16(vx6); float16x8_t vz7 = vabsq_f16(vx7); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); vz2 = vminq_f16(vz2, vsat_cutoff); vz3 = vminq_f16(vz3, vsat_cutoff); vz4 = vminq_f16(vz4, vsat_cutoff); vz5 = vminq_f16(vz5, vsat_cutoff); vz6 = vminq_f16(vz6, vsat_cutoff); vz7 = vminq_f16(vz7, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e); float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e); float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e); float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e); float16x8_t vn6 = vfmaq_f16(vmagic_bias, vz6, vminus_log2e); float16x8_t vn7 = vfmaq_f16(vmagic_bias, vz7, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10)); vn2 = vsubq_f16(vn2, vmagic_bias); const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10)); vn3 = vsubq_f16(vn3, vmagic_bias); const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10)); vn4 = vsubq_f16(vn4, vmagic_bias); const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10)); vn5 = vsubq_f16(vn5, vmagic_bias); const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10)); vn6 = vsubq_f16(vn6, vmagic_bias); const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10)); vn7 = vsubq_f16(vn7, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2); const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2); const float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2); const float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2); const float16x8_t vt6 = vfmaq_f16(vz6, vn6, vln2); const float16x8_t vt7 = vfmaq_f16(vz7, vn7, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2); float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3); float16x8_t vp4 = vfmaq_f16(vc2, vc3, vt4); float16x8_t vp5 = vfmaq_f16(vc2, vc3, vt5); float16x8_t vp6 = vfmaq_f16(vc2, vc3, vt6); float16x8_t vp7 = vfmaq_f16(vc2, vc3, vt7); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); vp2 = vfmsq_f16(vtwo, vp2, vt2); vp3 = vfmsq_f16(vtwo, vp3, vt3); vp4 = vfmsq_f16(vtwo, vp4, vt4); vp5 = vfmsq_f16(vtwo, vp5, vt5); vp6 = vfmsq_f16(vtwo, vp6, vt6); vp7 = vfmsq_f16(vtwo, vp7, vt7); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vts2 = vmulq_f16(vt2, vs2); const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one); const float16x8_t vts3 = vmulq_f16(vt3, vs3); const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one); const float16x8_t vts4 = vmulq_f16(vt4, vs4); const float16x8_t vsmo4 = vaddq_f16(vs4, vminus_one); const float16x8_t vts5 = vmulq_f16(vt5, vs5); const float16x8_t vsmo5 = vaddq_f16(vs5, vminus_one); const float16x8_t vts6 = vmulq_f16(vt6, vs6); const float16x8_t vsmo6 = vaddq_f16(vs6, vminus_one); const float16x8_t vts7 = vmulq_f16(vt7, vs7); const float16x8_t vsmo7 = vaddq_f16(vs7, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2); const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3); const float16x8_t vemo4 = vfmsq_f16(vsmo4, vp4, vts4); const float16x8_t vemo5 = vfmsq_f16(vsmo5, vp5, vts5); const float16x8_t vemo6 = vfmsq_f16(vsmo6, vp6, vts6); const float16x8_t vemo7 = vfmsq_f16(vsmo7, vp7, vts7); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo); const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo); const float16x8_t vepo4 = vaddq_f16(vemo4, vtwo); const float16x8_t vepo5 = vaddq_f16(vemo5, vtwo); const float16x8_t vepo6 = vaddq_f16(vemo6, vtwo); const float16x8_t vepo7 = vaddq_f16(vemo7, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); float16x8_t vrepo2 = vrecpeq_f16(vepo2); float16x8_t vrepo3 = vrecpeq_f16(vepo3); float16x8_t vrepo4 = vrecpeq_f16(vepo4); float16x8_t vrepo5 = vrecpeq_f16(vepo5); float16x8_t vrepo6 = vrecpeq_f16(vepo6); float16x8_t vrepo7 = vrecpeq_f16(vepo7); const float16x8_t verepo0 = vrecpsq_f16(vrepo0, vepo0); const float16x8_t verepo1 = vrecpsq_f16(vrepo1, vepo1); const float16x8_t verepo2 = vrecpsq_f16(vrepo2, vepo2); const float16x8_t verepo3 = vrecpsq_f16(vrepo3, vepo3); const float16x8_t verepo4 = vrecpsq_f16(vrepo4, vepo4); const float16x8_t verepo5 = vrecpsq_f16(vrepo5, vepo5); const float16x8_t verepo6 = vrecpsq_f16(vrepo6, vepo6); const float16x8_t verepo7 = vrecpsq_f16(vrepo7, vepo7); vrepo0 = vmulq_f16(vrepo0, verepo0); vrepo1 = vmulq_f16(vrepo1, verepo1); vrepo2 = vmulq_f16(vrepo2, verepo2); vrepo3 = vmulq_f16(vrepo3, verepo3); vrepo4 = vmulq_f16(vrepo4, verepo4); vrepo5 = vmulq_f16(vrepo5, verepo5); vrepo6 = vmulq_f16(vrepo6, verepo6); vrepo7 = vmulq_f16(vrepo7, verepo7); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); float16x8_t vy2 = vmulq_f16(vemo2, vrepo2); float16x8_t vy3 = vmulq_f16(vemo3, vrepo3); float16x8_t vy4 = vmulq_f16(vemo4, vrepo4); float16x8_t vy5 = vmulq_f16(vemo5, vrepo5); float16x8_t vy6 = vmulq_f16(vemo6, vrepo6); float16x8_t vy7 = vmulq_f16(vemo7, vrepo7); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vy2 = vbslq_f16(vsign_mask, vx2, vy2); vy3 = vbslq_f16(vsign_mask, vx3, vy3); vy4 = vbslq_f16(vsign_mask, vx4, vy4); vy5 = vbslq_f16(vsign_mask, vx5, vy5); vy6 = vbslq_f16(vsign_mask, vx6, vy6); vy7 = vbslq_f16(vsign_mask, vx7, vy7); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy4)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy5)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy6)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy7)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vrecpsq_f16(vrepo, vepo); vrepo = vmulq_f16(vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vrecpsq_f16(vrepo, vepo); vrepo = vmulq_f16(vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
12,051
42.666667
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-nr1recps-x72.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_nr1recps_x72( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 9 * sizeof(float16x8_t); n -= 9 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx8 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); float16x8_t vz2 = vabsq_f16(vx2); float16x8_t vz3 = vabsq_f16(vx3); float16x8_t vz4 = vabsq_f16(vx4); float16x8_t vz5 = vabsq_f16(vx5); float16x8_t vz6 = vabsq_f16(vx6); float16x8_t vz7 = vabsq_f16(vx7); float16x8_t vz8 = vabsq_f16(vx8); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); vz2 = vminq_f16(vz2, vsat_cutoff); vz3 = vminq_f16(vz3, vsat_cutoff); vz4 = vminq_f16(vz4, vsat_cutoff); vz5 = vminq_f16(vz5, vsat_cutoff); vz6 = vminq_f16(vz6, vsat_cutoff); vz7 = vminq_f16(vz7, vsat_cutoff); vz8 = vminq_f16(vz8, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e); float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e); float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e); float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e); float16x8_t vn6 = vfmaq_f16(vmagic_bias, vz6, vminus_log2e); float16x8_t vn7 = vfmaq_f16(vmagic_bias, vz7, vminus_log2e); float16x8_t vn8 = vfmaq_f16(vmagic_bias, vz8, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10)); vn2 = vsubq_f16(vn2, vmagic_bias); const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10)); vn3 = vsubq_f16(vn3, vmagic_bias); const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10)); vn4 = vsubq_f16(vn4, vmagic_bias); const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10)); vn5 = vsubq_f16(vn5, vmagic_bias); const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10)); vn6 = vsubq_f16(vn6, vmagic_bias); const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10)); vn7 = vsubq_f16(vn7, vmagic_bias); const float16x8_t vs8 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn8), 10)); vn8 = vsubq_f16(vn8, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2); const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2); const float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2); const float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2); const float16x8_t vt6 = vfmaq_f16(vz6, vn6, vln2); const float16x8_t vt7 = vfmaq_f16(vz7, vn7, vln2); const float16x8_t vt8 = vfmaq_f16(vz8, vn8, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2); float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3); float16x8_t vp4 = vfmaq_f16(vc2, vc3, vt4); float16x8_t vp5 = vfmaq_f16(vc2, vc3, vt5); float16x8_t vp6 = vfmaq_f16(vc2, vc3, vt6); float16x8_t vp7 = vfmaq_f16(vc2, vc3, vt7); float16x8_t vp8 = vfmaq_f16(vc2, vc3, vt8); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); vp2 = vfmsq_f16(vtwo, vp2, vt2); vp3 = vfmsq_f16(vtwo, vp3, vt3); vp4 = vfmsq_f16(vtwo, vp4, vt4); vp5 = vfmsq_f16(vtwo, vp5, vt5); vp6 = vfmsq_f16(vtwo, vp6, vt6); vp7 = vfmsq_f16(vtwo, vp7, vt7); vp8 = vfmsq_f16(vtwo, vp8, vt8); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vts2 = vmulq_f16(vt2, vs2); const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one); const float16x8_t vts3 = vmulq_f16(vt3, vs3); const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one); const float16x8_t vts4 = vmulq_f16(vt4, vs4); const float16x8_t vsmo4 = vaddq_f16(vs4, vminus_one); const float16x8_t vts5 = vmulq_f16(vt5, vs5); const float16x8_t vsmo5 = vaddq_f16(vs5, vminus_one); const float16x8_t vts6 = vmulq_f16(vt6, vs6); const float16x8_t vsmo6 = vaddq_f16(vs6, vminus_one); const float16x8_t vts7 = vmulq_f16(vt7, vs7); const float16x8_t vsmo7 = vaddq_f16(vs7, vminus_one); const float16x8_t vts8 = vmulq_f16(vt8, vs8); const float16x8_t vsmo8 = vaddq_f16(vs8, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2); const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3); const float16x8_t vemo4 = vfmsq_f16(vsmo4, vp4, vts4); const float16x8_t vemo5 = vfmsq_f16(vsmo5, vp5, vts5); const float16x8_t vemo6 = vfmsq_f16(vsmo6, vp6, vts6); const float16x8_t vemo7 = vfmsq_f16(vsmo7, vp7, vts7); const float16x8_t vemo8 = vfmsq_f16(vsmo8, vp8, vts8); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo); const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo); const float16x8_t vepo4 = vaddq_f16(vemo4, vtwo); const float16x8_t vepo5 = vaddq_f16(vemo5, vtwo); const float16x8_t vepo6 = vaddq_f16(vemo6, vtwo); const float16x8_t vepo7 = vaddq_f16(vemo7, vtwo); const float16x8_t vepo8 = vaddq_f16(vemo8, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); float16x8_t vrepo2 = vrecpeq_f16(vepo2); float16x8_t vrepo3 = vrecpeq_f16(vepo3); float16x8_t vrepo4 = vrecpeq_f16(vepo4); float16x8_t vrepo5 = vrecpeq_f16(vepo5); float16x8_t vrepo6 = vrecpeq_f16(vepo6); float16x8_t vrepo7 = vrecpeq_f16(vepo7); float16x8_t vrepo8 = vrecpeq_f16(vepo8); const float16x8_t verepo0 = vrecpsq_f16(vrepo0, vepo0); const float16x8_t verepo1 = vrecpsq_f16(vrepo1, vepo1); const float16x8_t verepo2 = vrecpsq_f16(vrepo2, vepo2); const float16x8_t verepo3 = vrecpsq_f16(vrepo3, vepo3); const float16x8_t verepo4 = vrecpsq_f16(vrepo4, vepo4); const float16x8_t verepo5 = vrecpsq_f16(vrepo5, vepo5); const float16x8_t verepo6 = vrecpsq_f16(vrepo6, vepo6); const float16x8_t verepo7 = vrecpsq_f16(vrepo7, vepo7); const float16x8_t verepo8 = vrecpsq_f16(vrepo8, vepo8); vrepo0 = vmulq_f16(vrepo0, verepo0); vrepo1 = vmulq_f16(vrepo1, verepo1); vrepo2 = vmulq_f16(vrepo2, verepo2); vrepo3 = vmulq_f16(vrepo3, verepo3); vrepo4 = vmulq_f16(vrepo4, verepo4); vrepo5 = vmulq_f16(vrepo5, verepo5); vrepo6 = vmulq_f16(vrepo6, verepo6); vrepo7 = vmulq_f16(vrepo7, verepo7); vrepo8 = vmulq_f16(vrepo8, verepo8); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); float16x8_t vy2 = vmulq_f16(vemo2, vrepo2); float16x8_t vy3 = vmulq_f16(vemo3, vrepo3); float16x8_t vy4 = vmulq_f16(vemo4, vrepo4); float16x8_t vy5 = vmulq_f16(vemo5, vrepo5); float16x8_t vy6 = vmulq_f16(vemo6, vrepo6); float16x8_t vy7 = vmulq_f16(vemo7, vrepo7); float16x8_t vy8 = vmulq_f16(vemo8, vrepo8); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vy2 = vbslq_f16(vsign_mask, vx2, vy2); vy3 = vbslq_f16(vsign_mask, vx3, vy3); vy4 = vbslq_f16(vsign_mask, vx4, vy4); vy5 = vbslq_f16(vsign_mask, vx5, vy5); vy6 = vbslq_f16(vsign_mask, vx6, vy6); vy7 = vbslq_f16(vsign_mask, vx7, vy7); vy8 = vbslq_f16(vsign_mask, vx8, vy8); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy4)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy5)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy6)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy7)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy8)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vrecpsq_f16(vrepo, vepo); vrepo = vmulq_f16(vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vrecpsq_f16(vrepo, vepo); vrepo = vmulq_f16(vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
13,053
43.250847
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-nr1recps-x8.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_nr1recps_x8( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vrecpsq_f16(vrepo, vepo); vrepo = vmulq_f16(vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vrecpsq_f16(vrepo, vepo); vrepo = vmulq_f16(vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
3,949
34.267857
93
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-nr1recps-x80.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_nr1recps_x80( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 10 * sizeof(float16x8_t); n -= 10 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx8 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx9 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); float16x8_t vz2 = vabsq_f16(vx2); float16x8_t vz3 = vabsq_f16(vx3); float16x8_t vz4 = vabsq_f16(vx4); float16x8_t vz5 = vabsq_f16(vx5); float16x8_t vz6 = vabsq_f16(vx6); float16x8_t vz7 = vabsq_f16(vx7); float16x8_t vz8 = vabsq_f16(vx8); float16x8_t vz9 = vabsq_f16(vx9); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); vz2 = vminq_f16(vz2, vsat_cutoff); vz3 = vminq_f16(vz3, vsat_cutoff); vz4 = vminq_f16(vz4, vsat_cutoff); vz5 = vminq_f16(vz5, vsat_cutoff); vz6 = vminq_f16(vz6, vsat_cutoff); vz7 = vminq_f16(vz7, vsat_cutoff); vz8 = vminq_f16(vz8, vsat_cutoff); vz9 = vminq_f16(vz9, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e); float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e); float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e); float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e); float16x8_t vn6 = vfmaq_f16(vmagic_bias, vz6, vminus_log2e); float16x8_t vn7 = vfmaq_f16(vmagic_bias, vz7, vminus_log2e); float16x8_t vn8 = vfmaq_f16(vmagic_bias, vz8, vminus_log2e); float16x8_t vn9 = vfmaq_f16(vmagic_bias, vz9, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10)); vn2 = vsubq_f16(vn2, vmagic_bias); const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10)); vn3 = vsubq_f16(vn3, vmagic_bias); const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10)); vn4 = vsubq_f16(vn4, vmagic_bias); const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10)); vn5 = vsubq_f16(vn5, vmagic_bias); const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10)); vn6 = vsubq_f16(vn6, vmagic_bias); const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10)); vn7 = vsubq_f16(vn7, vmagic_bias); const float16x8_t vs8 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn8), 10)); vn8 = vsubq_f16(vn8, vmagic_bias); const float16x8_t vs9 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn9), 10)); vn9 = vsubq_f16(vn9, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2); const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2); const float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2); const float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2); const float16x8_t vt6 = vfmaq_f16(vz6, vn6, vln2); const float16x8_t vt7 = vfmaq_f16(vz7, vn7, vln2); const float16x8_t vt8 = vfmaq_f16(vz8, vn8, vln2); const float16x8_t vt9 = vfmaq_f16(vz9, vn9, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2); float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3); float16x8_t vp4 = vfmaq_f16(vc2, vc3, vt4); float16x8_t vp5 = vfmaq_f16(vc2, vc3, vt5); float16x8_t vp6 = vfmaq_f16(vc2, vc3, vt6); float16x8_t vp7 = vfmaq_f16(vc2, vc3, vt7); float16x8_t vp8 = vfmaq_f16(vc2, vc3, vt8); float16x8_t vp9 = vfmaq_f16(vc2, vc3, vt9); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); vp2 = vfmsq_f16(vtwo, vp2, vt2); vp3 = vfmsq_f16(vtwo, vp3, vt3); vp4 = vfmsq_f16(vtwo, vp4, vt4); vp5 = vfmsq_f16(vtwo, vp5, vt5); vp6 = vfmsq_f16(vtwo, vp6, vt6); vp7 = vfmsq_f16(vtwo, vp7, vt7); vp8 = vfmsq_f16(vtwo, vp8, vt8); vp9 = vfmsq_f16(vtwo, vp9, vt9); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vts2 = vmulq_f16(vt2, vs2); const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one); const float16x8_t vts3 = vmulq_f16(vt3, vs3); const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one); const float16x8_t vts4 = vmulq_f16(vt4, vs4); const float16x8_t vsmo4 = vaddq_f16(vs4, vminus_one); const float16x8_t vts5 = vmulq_f16(vt5, vs5); const float16x8_t vsmo5 = vaddq_f16(vs5, vminus_one); const float16x8_t vts6 = vmulq_f16(vt6, vs6); const float16x8_t vsmo6 = vaddq_f16(vs6, vminus_one); const float16x8_t vts7 = vmulq_f16(vt7, vs7); const float16x8_t vsmo7 = vaddq_f16(vs7, vminus_one); const float16x8_t vts8 = vmulq_f16(vt8, vs8); const float16x8_t vsmo8 = vaddq_f16(vs8, vminus_one); const float16x8_t vts9 = vmulq_f16(vt9, vs9); const float16x8_t vsmo9 = vaddq_f16(vs9, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2); const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3); const float16x8_t vemo4 = vfmsq_f16(vsmo4, vp4, vts4); const float16x8_t vemo5 = vfmsq_f16(vsmo5, vp5, vts5); const float16x8_t vemo6 = vfmsq_f16(vsmo6, vp6, vts6); const float16x8_t vemo7 = vfmsq_f16(vsmo7, vp7, vts7); const float16x8_t vemo8 = vfmsq_f16(vsmo8, vp8, vts8); const float16x8_t vemo9 = vfmsq_f16(vsmo9, vp9, vts9); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo); const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo); const float16x8_t vepo4 = vaddq_f16(vemo4, vtwo); const float16x8_t vepo5 = vaddq_f16(vemo5, vtwo); const float16x8_t vepo6 = vaddq_f16(vemo6, vtwo); const float16x8_t vepo7 = vaddq_f16(vemo7, vtwo); const float16x8_t vepo8 = vaddq_f16(vemo8, vtwo); const float16x8_t vepo9 = vaddq_f16(vemo9, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); float16x8_t vrepo2 = vrecpeq_f16(vepo2); float16x8_t vrepo3 = vrecpeq_f16(vepo3); float16x8_t vrepo4 = vrecpeq_f16(vepo4); float16x8_t vrepo5 = vrecpeq_f16(vepo5); float16x8_t vrepo6 = vrecpeq_f16(vepo6); float16x8_t vrepo7 = vrecpeq_f16(vepo7); float16x8_t vrepo8 = vrecpeq_f16(vepo8); float16x8_t vrepo9 = vrecpeq_f16(vepo9); const float16x8_t verepo0 = vrecpsq_f16(vrepo0, vepo0); const float16x8_t verepo1 = vrecpsq_f16(vrepo1, vepo1); const float16x8_t verepo2 = vrecpsq_f16(vrepo2, vepo2); const float16x8_t verepo3 = vrecpsq_f16(vrepo3, vepo3); const float16x8_t verepo4 = vrecpsq_f16(vrepo4, vepo4); const float16x8_t verepo5 = vrecpsq_f16(vrepo5, vepo5); const float16x8_t verepo6 = vrecpsq_f16(vrepo6, vepo6); const float16x8_t verepo7 = vrecpsq_f16(vrepo7, vepo7); const float16x8_t verepo8 = vrecpsq_f16(vrepo8, vepo8); const float16x8_t verepo9 = vrecpsq_f16(vrepo9, vepo9); vrepo0 = vmulq_f16(vrepo0, verepo0); vrepo1 = vmulq_f16(vrepo1, verepo1); vrepo2 = vmulq_f16(vrepo2, verepo2); vrepo3 = vmulq_f16(vrepo3, verepo3); vrepo4 = vmulq_f16(vrepo4, verepo4); vrepo5 = vmulq_f16(vrepo5, verepo5); vrepo6 = vmulq_f16(vrepo6, verepo6); vrepo7 = vmulq_f16(vrepo7, verepo7); vrepo8 = vmulq_f16(vrepo8, verepo8); vrepo9 = vmulq_f16(vrepo9, verepo9); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); float16x8_t vy2 = vmulq_f16(vemo2, vrepo2); float16x8_t vy3 = vmulq_f16(vemo3, vrepo3); float16x8_t vy4 = vmulq_f16(vemo4, vrepo4); float16x8_t vy5 = vmulq_f16(vemo5, vrepo5); float16x8_t vy6 = vmulq_f16(vemo6, vrepo6); float16x8_t vy7 = vmulq_f16(vemo7, vrepo7); float16x8_t vy8 = vmulq_f16(vemo8, vrepo8); float16x8_t vy9 = vmulq_f16(vemo9, vrepo9); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vy2 = vbslq_f16(vsign_mask, vx2, vy2); vy3 = vbslq_f16(vsign_mask, vx3, vy3); vy4 = vbslq_f16(vsign_mask, vx4, vy4); vy5 = vbslq_f16(vsign_mask, vx5, vy5); vy6 = vbslq_f16(vsign_mask, vx6, vy6); vy7 = vbslq_f16(vsign_mask, vx7, vy7); vy8 = vbslq_f16(vsign_mask, vx8, vy8); vy9 = vbslq_f16(vsign_mask, vx9, vy9); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy4)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy5)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy6)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy7)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy8)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy9)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vrecpsq_f16(vrepo, vepo); vrepo = vmulq_f16(vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); const float16x8_t verepo = vrecpsq_f16(vrepo, vepo); vrepo = vmulq_f16(vrepo, verepo); float16x8_t vy = vmulq_f16(vemo, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
14,057
43.770701
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-recpeadj-x16.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_recpeadj_x16( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 2 * sizeof(float16x8_t); n -= 2 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); const float16x8_t vey0 = vfmsq_f16(vemo0, vy0, vepo0); const float16x8_t vey1 = vfmsq_f16(vemo1, vy1, vepo1); vy0 = vfmaq_f16(vy0, vey0, vrepo0); vy1 = vfmaq_f16(vy1, vey1, vrepo1); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); float16x8_t vy = vmulq_f16(vemo, vrepo); const float16x8_t vey = vfmsq_f16(vemo, vy, vepo); vy = vfmaq_f16(vy, vey, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); float16x8_t vy = vmulq_f16(vemo, vrepo); const float16x8_t vey = vfmsq_f16(vemo, vy, vepo); vy = vfmaq_f16(vy, vey, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
6,027
36.209877
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-recpeadj-x24.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_recpeadj_x24( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 3 * sizeof(float16x8_t); n -= 3 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); float16x8_t vz2 = vabsq_f16(vx2); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); vz2 = vminq_f16(vz2, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10)); vn2 = vsubq_f16(vn2, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); vp2 = vfmsq_f16(vtwo, vp2, vt2); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vts2 = vmulq_f16(vt2, vs2); const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); float16x8_t vrepo2 = vrecpeq_f16(vepo2); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); float16x8_t vy2 = vmulq_f16(vemo2, vrepo2); const float16x8_t vey0 = vfmsq_f16(vemo0, vy0, vepo0); const float16x8_t vey1 = vfmsq_f16(vemo1, vy1, vepo1); const float16x8_t vey2 = vfmsq_f16(vemo2, vy2, vepo2); vy0 = vfmaq_f16(vy0, vey0, vrepo0); vy1 = vfmaq_f16(vy1, vey1, vrepo1); vy2 = vfmaq_f16(vy2, vey2, vrepo2); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vy2 = vbslq_f16(vsign_mask, vx2, vy2); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); float16x8_t vy = vmulq_f16(vemo, vrepo); const float16x8_t vey = vfmsq_f16(vemo, vy, vepo); vy = vfmaq_f16(vy, vey, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); float16x8_t vy = vmulq_f16(vemo, vrepo); const float16x8_t vey = vfmsq_f16(vemo, vy, vepo); vy = vfmaq_f16(vy, vey, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
7,027
37.828729
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-recpeadj-x32.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_recpeadj_x32( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 4 * sizeof(float16x8_t); n -= 4 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); float16x8_t vz2 = vabsq_f16(vx2); float16x8_t vz3 = vabsq_f16(vx3); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); vz2 = vminq_f16(vz2, vsat_cutoff); vz3 = vminq_f16(vz3, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e); float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10)); vn2 = vsubq_f16(vn2, vmagic_bias); const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10)); vn3 = vsubq_f16(vn3, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2); const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2); float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); vp2 = vfmsq_f16(vtwo, vp2, vt2); vp3 = vfmsq_f16(vtwo, vp3, vt3); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vts2 = vmulq_f16(vt2, vs2); const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one); const float16x8_t vts3 = vmulq_f16(vt3, vs3); const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2); const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo); const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); float16x8_t vrepo2 = vrecpeq_f16(vepo2); float16x8_t vrepo3 = vrecpeq_f16(vepo3); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); float16x8_t vy2 = vmulq_f16(vemo2, vrepo2); float16x8_t vy3 = vmulq_f16(vemo3, vrepo3); const float16x8_t vey0 = vfmsq_f16(vemo0, vy0, vepo0); const float16x8_t vey1 = vfmsq_f16(vemo1, vy1, vepo1); const float16x8_t vey2 = vfmsq_f16(vemo2, vy2, vepo2); const float16x8_t vey3 = vfmsq_f16(vemo3, vy3, vepo3); vy0 = vfmaq_f16(vy0, vey0, vrepo0); vy1 = vfmaq_f16(vy1, vey1, vrepo1); vy2 = vfmaq_f16(vy2, vey2, vrepo2); vy3 = vfmaq_f16(vy3, vey3, vrepo3); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vy2 = vbslq_f16(vsign_mask, vx2, vy2); vy3 = vbslq_f16(vsign_mask, vx3, vy3); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); float16x8_t vy = vmulq_f16(vemo, vrepo); const float16x8_t vey = vfmsq_f16(vemo, vy, vepo); vy = vfmaq_f16(vy, vey, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); float16x8_t vy = vmulq_f16(vemo, vrepo); const float16x8_t vey = vfmsq_f16(vemo, vy, vepo); vy = vfmaq_f16(vy, vey, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
8,027
39.14
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-recpeadj-x40.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_recpeadj_x40( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 5 * sizeof(float16x8_t); n -= 5 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); float16x8_t vz2 = vabsq_f16(vx2); float16x8_t vz3 = vabsq_f16(vx3); float16x8_t vz4 = vabsq_f16(vx4); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); vz2 = vminq_f16(vz2, vsat_cutoff); vz3 = vminq_f16(vz3, vsat_cutoff); vz4 = vminq_f16(vz4, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e); float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e); float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10)); vn2 = vsubq_f16(vn2, vmagic_bias); const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10)); vn3 = vsubq_f16(vn3, vmagic_bias); const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10)); vn4 = vsubq_f16(vn4, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2); const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2); const float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2); float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3); float16x8_t vp4 = vfmaq_f16(vc2, vc3, vt4); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); vp2 = vfmsq_f16(vtwo, vp2, vt2); vp3 = vfmsq_f16(vtwo, vp3, vt3); vp4 = vfmsq_f16(vtwo, vp4, vt4); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vts2 = vmulq_f16(vt2, vs2); const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one); const float16x8_t vts3 = vmulq_f16(vt3, vs3); const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one); const float16x8_t vts4 = vmulq_f16(vt4, vs4); const float16x8_t vsmo4 = vaddq_f16(vs4, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2); const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3); const float16x8_t vemo4 = vfmsq_f16(vsmo4, vp4, vts4); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo); const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo); const float16x8_t vepo4 = vaddq_f16(vemo4, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); float16x8_t vrepo2 = vrecpeq_f16(vepo2); float16x8_t vrepo3 = vrecpeq_f16(vepo3); float16x8_t vrepo4 = vrecpeq_f16(vepo4); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); float16x8_t vy2 = vmulq_f16(vemo2, vrepo2); float16x8_t vy3 = vmulq_f16(vemo3, vrepo3); float16x8_t vy4 = vmulq_f16(vemo4, vrepo4); const float16x8_t vey0 = vfmsq_f16(vemo0, vy0, vepo0); const float16x8_t vey1 = vfmsq_f16(vemo1, vy1, vepo1); const float16x8_t vey2 = vfmsq_f16(vemo2, vy2, vepo2); const float16x8_t vey3 = vfmsq_f16(vemo3, vy3, vepo3); const float16x8_t vey4 = vfmsq_f16(vemo4, vy4, vepo4); vy0 = vfmaq_f16(vy0, vey0, vrepo0); vy1 = vfmaq_f16(vy1, vey1, vrepo1); vy2 = vfmaq_f16(vy2, vey2, vrepo2); vy3 = vfmaq_f16(vy3, vey3, vrepo3); vy4 = vfmaq_f16(vy4, vey4, vrepo4); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vy2 = vbslq_f16(vsign_mask, vx2, vy2); vy3 = vbslq_f16(vsign_mask, vx3, vy3); vy4 = vbslq_f16(vsign_mask, vx4, vy4); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy4)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); float16x8_t vy = vmulq_f16(vemo, vrepo); const float16x8_t vey = vfmsq_f16(vemo, vy, vepo); vy = vfmaq_f16(vy, vey, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); float16x8_t vy = vmulq_f16(vemo, vrepo); const float16x8_t vey = vfmsq_f16(vemo, vy, vepo); vy = vfmaq_f16(vy, vey, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
9,027
40.223744
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-recpeadj-x48.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_recpeadj_x48( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 6 * sizeof(float16x8_t); n -= 6 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); float16x8_t vz2 = vabsq_f16(vx2); float16x8_t vz3 = vabsq_f16(vx3); float16x8_t vz4 = vabsq_f16(vx4); float16x8_t vz5 = vabsq_f16(vx5); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); vz2 = vminq_f16(vz2, vsat_cutoff); vz3 = vminq_f16(vz3, vsat_cutoff); vz4 = vminq_f16(vz4, vsat_cutoff); vz5 = vminq_f16(vz5, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e); float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e); float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e); float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10)); vn2 = vsubq_f16(vn2, vmagic_bias); const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10)); vn3 = vsubq_f16(vn3, vmagic_bias); const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10)); vn4 = vsubq_f16(vn4, vmagic_bias); const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10)); vn5 = vsubq_f16(vn5, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2); const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2); const float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2); const float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2); float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3); float16x8_t vp4 = vfmaq_f16(vc2, vc3, vt4); float16x8_t vp5 = vfmaq_f16(vc2, vc3, vt5); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); vp2 = vfmsq_f16(vtwo, vp2, vt2); vp3 = vfmsq_f16(vtwo, vp3, vt3); vp4 = vfmsq_f16(vtwo, vp4, vt4); vp5 = vfmsq_f16(vtwo, vp5, vt5); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vts2 = vmulq_f16(vt2, vs2); const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one); const float16x8_t vts3 = vmulq_f16(vt3, vs3); const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one); const float16x8_t vts4 = vmulq_f16(vt4, vs4); const float16x8_t vsmo4 = vaddq_f16(vs4, vminus_one); const float16x8_t vts5 = vmulq_f16(vt5, vs5); const float16x8_t vsmo5 = vaddq_f16(vs5, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2); const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3); const float16x8_t vemo4 = vfmsq_f16(vsmo4, vp4, vts4); const float16x8_t vemo5 = vfmsq_f16(vsmo5, vp5, vts5); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo); const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo); const float16x8_t vepo4 = vaddq_f16(vemo4, vtwo); const float16x8_t vepo5 = vaddq_f16(vemo5, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); float16x8_t vrepo2 = vrecpeq_f16(vepo2); float16x8_t vrepo3 = vrecpeq_f16(vepo3); float16x8_t vrepo4 = vrecpeq_f16(vepo4); float16x8_t vrepo5 = vrecpeq_f16(vepo5); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); float16x8_t vy2 = vmulq_f16(vemo2, vrepo2); float16x8_t vy3 = vmulq_f16(vemo3, vrepo3); float16x8_t vy4 = vmulq_f16(vemo4, vrepo4); float16x8_t vy5 = vmulq_f16(vemo5, vrepo5); const float16x8_t vey0 = vfmsq_f16(vemo0, vy0, vepo0); const float16x8_t vey1 = vfmsq_f16(vemo1, vy1, vepo1); const float16x8_t vey2 = vfmsq_f16(vemo2, vy2, vepo2); const float16x8_t vey3 = vfmsq_f16(vemo3, vy3, vepo3); const float16x8_t vey4 = vfmsq_f16(vemo4, vy4, vepo4); const float16x8_t vey5 = vfmsq_f16(vemo5, vy5, vepo5); vy0 = vfmaq_f16(vy0, vey0, vrepo0); vy1 = vfmaq_f16(vy1, vey1, vrepo1); vy2 = vfmaq_f16(vy2, vey2, vrepo2); vy3 = vfmaq_f16(vy3, vey3, vrepo3); vy4 = vfmaq_f16(vy4, vey4, vrepo4); vy5 = vfmaq_f16(vy5, vey5, vrepo5); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vy2 = vbslq_f16(vsign_mask, vx2, vy2); vy3 = vbslq_f16(vsign_mask, vx3, vy3); vy4 = vbslq_f16(vsign_mask, vx4, vy4); vy5 = vbslq_f16(vsign_mask, vx5, vy5); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy4)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy5)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); float16x8_t vy = vmulq_f16(vemo, vrepo); const float16x8_t vey = vfmsq_f16(vemo, vy, vepo); vy = vfmaq_f16(vy, vey, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); float16x8_t vy = vmulq_f16(vemo, vrepo); const float16x8_t vey = vfmsq_f16(vemo, vy, vepo); vy = vfmaq_f16(vy, vey, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
10,027
41.134454
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-recpeadj-x56.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_recpeadj_x56( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 7 * sizeof(float16x8_t); n -= 7 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); float16x8_t vz2 = vabsq_f16(vx2); float16x8_t vz3 = vabsq_f16(vx3); float16x8_t vz4 = vabsq_f16(vx4); float16x8_t vz5 = vabsq_f16(vx5); float16x8_t vz6 = vabsq_f16(vx6); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); vz2 = vminq_f16(vz2, vsat_cutoff); vz3 = vminq_f16(vz3, vsat_cutoff); vz4 = vminq_f16(vz4, vsat_cutoff); vz5 = vminq_f16(vz5, vsat_cutoff); vz6 = vminq_f16(vz6, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e); float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e); float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e); float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e); float16x8_t vn6 = vfmaq_f16(vmagic_bias, vz6, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10)); vn2 = vsubq_f16(vn2, vmagic_bias); const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10)); vn3 = vsubq_f16(vn3, vmagic_bias); const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10)); vn4 = vsubq_f16(vn4, vmagic_bias); const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10)); vn5 = vsubq_f16(vn5, vmagic_bias); const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10)); vn6 = vsubq_f16(vn6, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2); const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2); const float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2); const float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2); const float16x8_t vt6 = vfmaq_f16(vz6, vn6, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2); float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3); float16x8_t vp4 = vfmaq_f16(vc2, vc3, vt4); float16x8_t vp5 = vfmaq_f16(vc2, vc3, vt5); float16x8_t vp6 = vfmaq_f16(vc2, vc3, vt6); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); vp2 = vfmsq_f16(vtwo, vp2, vt2); vp3 = vfmsq_f16(vtwo, vp3, vt3); vp4 = vfmsq_f16(vtwo, vp4, vt4); vp5 = vfmsq_f16(vtwo, vp5, vt5); vp6 = vfmsq_f16(vtwo, vp6, vt6); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vts2 = vmulq_f16(vt2, vs2); const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one); const float16x8_t vts3 = vmulq_f16(vt3, vs3); const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one); const float16x8_t vts4 = vmulq_f16(vt4, vs4); const float16x8_t vsmo4 = vaddq_f16(vs4, vminus_one); const float16x8_t vts5 = vmulq_f16(vt5, vs5); const float16x8_t vsmo5 = vaddq_f16(vs5, vminus_one); const float16x8_t vts6 = vmulq_f16(vt6, vs6); const float16x8_t vsmo6 = vaddq_f16(vs6, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2); const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3); const float16x8_t vemo4 = vfmsq_f16(vsmo4, vp4, vts4); const float16x8_t vemo5 = vfmsq_f16(vsmo5, vp5, vts5); const float16x8_t vemo6 = vfmsq_f16(vsmo6, vp6, vts6); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo); const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo); const float16x8_t vepo4 = vaddq_f16(vemo4, vtwo); const float16x8_t vepo5 = vaddq_f16(vemo5, vtwo); const float16x8_t vepo6 = vaddq_f16(vemo6, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); float16x8_t vrepo2 = vrecpeq_f16(vepo2); float16x8_t vrepo3 = vrecpeq_f16(vepo3); float16x8_t vrepo4 = vrecpeq_f16(vepo4); float16x8_t vrepo5 = vrecpeq_f16(vepo5); float16x8_t vrepo6 = vrecpeq_f16(vepo6); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); float16x8_t vy2 = vmulq_f16(vemo2, vrepo2); float16x8_t vy3 = vmulq_f16(vemo3, vrepo3); float16x8_t vy4 = vmulq_f16(vemo4, vrepo4); float16x8_t vy5 = vmulq_f16(vemo5, vrepo5); float16x8_t vy6 = vmulq_f16(vemo6, vrepo6); const float16x8_t vey0 = vfmsq_f16(vemo0, vy0, vepo0); const float16x8_t vey1 = vfmsq_f16(vemo1, vy1, vepo1); const float16x8_t vey2 = vfmsq_f16(vemo2, vy2, vepo2); const float16x8_t vey3 = vfmsq_f16(vemo3, vy3, vepo3); const float16x8_t vey4 = vfmsq_f16(vemo4, vy4, vepo4); const float16x8_t vey5 = vfmsq_f16(vemo5, vy5, vepo5); const float16x8_t vey6 = vfmsq_f16(vemo6, vy6, vepo6); vy0 = vfmaq_f16(vy0, vey0, vrepo0); vy1 = vfmaq_f16(vy1, vey1, vrepo1); vy2 = vfmaq_f16(vy2, vey2, vrepo2); vy3 = vfmaq_f16(vy3, vey3, vrepo3); vy4 = vfmaq_f16(vy4, vey4, vrepo4); vy5 = vfmaq_f16(vy5, vey5, vrepo5); vy6 = vfmaq_f16(vy6, vey6, vrepo6); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vy2 = vbslq_f16(vsign_mask, vx2, vy2); vy3 = vbslq_f16(vsign_mask, vx3, vy3); vy4 = vbslq_f16(vsign_mask, vx4, vy4); vy5 = vbslq_f16(vsign_mask, vx5, vy5); vy6 = vbslq_f16(vsign_mask, vx6, vy6); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy4)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy5)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy6)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); float16x8_t vy = vmulq_f16(vemo, vrepo); const float16x8_t vey = vfmsq_f16(vemo, vy, vepo); vy = vfmaq_f16(vy, vey, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); float16x8_t vy = vmulq_f16(vemo, vrepo); const float16x8_t vey = vfmsq_f16(vemo, vy, vepo); vy = vfmaq_f16(vy, vey, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
11,027
41.910506
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-recpeadj-x64.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_recpeadj_x64( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 8 * sizeof(float16x8_t); n -= 8 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); float16x8_t vz2 = vabsq_f16(vx2); float16x8_t vz3 = vabsq_f16(vx3); float16x8_t vz4 = vabsq_f16(vx4); float16x8_t vz5 = vabsq_f16(vx5); float16x8_t vz6 = vabsq_f16(vx6); float16x8_t vz7 = vabsq_f16(vx7); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); vz2 = vminq_f16(vz2, vsat_cutoff); vz3 = vminq_f16(vz3, vsat_cutoff); vz4 = vminq_f16(vz4, vsat_cutoff); vz5 = vminq_f16(vz5, vsat_cutoff); vz6 = vminq_f16(vz6, vsat_cutoff); vz7 = vminq_f16(vz7, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e); float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e); float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e); float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e); float16x8_t vn6 = vfmaq_f16(vmagic_bias, vz6, vminus_log2e); float16x8_t vn7 = vfmaq_f16(vmagic_bias, vz7, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10)); vn2 = vsubq_f16(vn2, vmagic_bias); const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10)); vn3 = vsubq_f16(vn3, vmagic_bias); const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10)); vn4 = vsubq_f16(vn4, vmagic_bias); const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10)); vn5 = vsubq_f16(vn5, vmagic_bias); const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10)); vn6 = vsubq_f16(vn6, vmagic_bias); const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10)); vn7 = vsubq_f16(vn7, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2); const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2); const float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2); const float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2); const float16x8_t vt6 = vfmaq_f16(vz6, vn6, vln2); const float16x8_t vt7 = vfmaq_f16(vz7, vn7, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2); float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3); float16x8_t vp4 = vfmaq_f16(vc2, vc3, vt4); float16x8_t vp5 = vfmaq_f16(vc2, vc3, vt5); float16x8_t vp6 = vfmaq_f16(vc2, vc3, vt6); float16x8_t vp7 = vfmaq_f16(vc2, vc3, vt7); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); vp2 = vfmsq_f16(vtwo, vp2, vt2); vp3 = vfmsq_f16(vtwo, vp3, vt3); vp4 = vfmsq_f16(vtwo, vp4, vt4); vp5 = vfmsq_f16(vtwo, vp5, vt5); vp6 = vfmsq_f16(vtwo, vp6, vt6); vp7 = vfmsq_f16(vtwo, vp7, vt7); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vts2 = vmulq_f16(vt2, vs2); const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one); const float16x8_t vts3 = vmulq_f16(vt3, vs3); const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one); const float16x8_t vts4 = vmulq_f16(vt4, vs4); const float16x8_t vsmo4 = vaddq_f16(vs4, vminus_one); const float16x8_t vts5 = vmulq_f16(vt5, vs5); const float16x8_t vsmo5 = vaddq_f16(vs5, vminus_one); const float16x8_t vts6 = vmulq_f16(vt6, vs6); const float16x8_t vsmo6 = vaddq_f16(vs6, vminus_one); const float16x8_t vts7 = vmulq_f16(vt7, vs7); const float16x8_t vsmo7 = vaddq_f16(vs7, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2); const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3); const float16x8_t vemo4 = vfmsq_f16(vsmo4, vp4, vts4); const float16x8_t vemo5 = vfmsq_f16(vsmo5, vp5, vts5); const float16x8_t vemo6 = vfmsq_f16(vsmo6, vp6, vts6); const float16x8_t vemo7 = vfmsq_f16(vsmo7, vp7, vts7); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo); const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo); const float16x8_t vepo4 = vaddq_f16(vemo4, vtwo); const float16x8_t vepo5 = vaddq_f16(vemo5, vtwo); const float16x8_t vepo6 = vaddq_f16(vemo6, vtwo); const float16x8_t vepo7 = vaddq_f16(vemo7, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); float16x8_t vrepo2 = vrecpeq_f16(vepo2); float16x8_t vrepo3 = vrecpeq_f16(vepo3); float16x8_t vrepo4 = vrecpeq_f16(vepo4); float16x8_t vrepo5 = vrecpeq_f16(vepo5); float16x8_t vrepo6 = vrecpeq_f16(vepo6); float16x8_t vrepo7 = vrecpeq_f16(vepo7); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); float16x8_t vy2 = vmulq_f16(vemo2, vrepo2); float16x8_t vy3 = vmulq_f16(vemo3, vrepo3); float16x8_t vy4 = vmulq_f16(vemo4, vrepo4); float16x8_t vy5 = vmulq_f16(vemo5, vrepo5); float16x8_t vy6 = vmulq_f16(vemo6, vrepo6); float16x8_t vy7 = vmulq_f16(vemo7, vrepo7); const float16x8_t vey0 = vfmsq_f16(vemo0, vy0, vepo0); const float16x8_t vey1 = vfmsq_f16(vemo1, vy1, vepo1); const float16x8_t vey2 = vfmsq_f16(vemo2, vy2, vepo2); const float16x8_t vey3 = vfmsq_f16(vemo3, vy3, vepo3); const float16x8_t vey4 = vfmsq_f16(vemo4, vy4, vepo4); const float16x8_t vey5 = vfmsq_f16(vemo5, vy5, vepo5); const float16x8_t vey6 = vfmsq_f16(vemo6, vy6, vepo6); const float16x8_t vey7 = vfmsq_f16(vemo7, vy7, vepo7); vy0 = vfmaq_f16(vy0, vey0, vrepo0); vy1 = vfmaq_f16(vy1, vey1, vrepo1); vy2 = vfmaq_f16(vy2, vey2, vrepo2); vy3 = vfmaq_f16(vy3, vey3, vrepo3); vy4 = vfmaq_f16(vy4, vey4, vrepo4); vy5 = vfmaq_f16(vy5, vey5, vrepo5); vy6 = vfmaq_f16(vy6, vey6, vrepo6); vy7 = vfmaq_f16(vy7, vey7, vrepo7); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vy2 = vbslq_f16(vsign_mask, vx2, vy2); vy3 = vbslq_f16(vsign_mask, vx3, vy3); vy4 = vbslq_f16(vsign_mask, vx4, vy4); vy5 = vbslq_f16(vsign_mask, vx5, vy5); vy6 = vbslq_f16(vsign_mask, vx6, vy6); vy7 = vbslq_f16(vsign_mask, vx7, vy7); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy4)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy5)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy6)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy7)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); float16x8_t vy = vmulq_f16(vemo, vrepo); const float16x8_t vey = vfmsq_f16(vemo, vy, vepo); vy = vfmaq_f16(vy, vey, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); float16x8_t vy = vmulq_f16(vemo, vrepo); const float16x8_t vey = vfmsq_f16(vemo, vy, vepo); vy = vfmaq_f16(vy, vey, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
12,027
42.57971
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-recpeadj-x72.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_recpeadj_x72( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 9 * sizeof(float16x8_t); n -= 9 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx8 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); float16x8_t vz2 = vabsq_f16(vx2); float16x8_t vz3 = vabsq_f16(vx3); float16x8_t vz4 = vabsq_f16(vx4); float16x8_t vz5 = vabsq_f16(vx5); float16x8_t vz6 = vabsq_f16(vx6); float16x8_t vz7 = vabsq_f16(vx7); float16x8_t vz8 = vabsq_f16(vx8); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); vz2 = vminq_f16(vz2, vsat_cutoff); vz3 = vminq_f16(vz3, vsat_cutoff); vz4 = vminq_f16(vz4, vsat_cutoff); vz5 = vminq_f16(vz5, vsat_cutoff); vz6 = vminq_f16(vz6, vsat_cutoff); vz7 = vminq_f16(vz7, vsat_cutoff); vz8 = vminq_f16(vz8, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e); float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e); float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e); float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e); float16x8_t vn6 = vfmaq_f16(vmagic_bias, vz6, vminus_log2e); float16x8_t vn7 = vfmaq_f16(vmagic_bias, vz7, vminus_log2e); float16x8_t vn8 = vfmaq_f16(vmagic_bias, vz8, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10)); vn2 = vsubq_f16(vn2, vmagic_bias); const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10)); vn3 = vsubq_f16(vn3, vmagic_bias); const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10)); vn4 = vsubq_f16(vn4, vmagic_bias); const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10)); vn5 = vsubq_f16(vn5, vmagic_bias); const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10)); vn6 = vsubq_f16(vn6, vmagic_bias); const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10)); vn7 = vsubq_f16(vn7, vmagic_bias); const float16x8_t vs8 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn8), 10)); vn8 = vsubq_f16(vn8, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2); const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2); const float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2); const float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2); const float16x8_t vt6 = vfmaq_f16(vz6, vn6, vln2); const float16x8_t vt7 = vfmaq_f16(vz7, vn7, vln2); const float16x8_t vt8 = vfmaq_f16(vz8, vn8, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2); float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3); float16x8_t vp4 = vfmaq_f16(vc2, vc3, vt4); float16x8_t vp5 = vfmaq_f16(vc2, vc3, vt5); float16x8_t vp6 = vfmaq_f16(vc2, vc3, vt6); float16x8_t vp7 = vfmaq_f16(vc2, vc3, vt7); float16x8_t vp8 = vfmaq_f16(vc2, vc3, vt8); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); vp2 = vfmsq_f16(vtwo, vp2, vt2); vp3 = vfmsq_f16(vtwo, vp3, vt3); vp4 = vfmsq_f16(vtwo, vp4, vt4); vp5 = vfmsq_f16(vtwo, vp5, vt5); vp6 = vfmsq_f16(vtwo, vp6, vt6); vp7 = vfmsq_f16(vtwo, vp7, vt7); vp8 = vfmsq_f16(vtwo, vp8, vt8); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vts2 = vmulq_f16(vt2, vs2); const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one); const float16x8_t vts3 = vmulq_f16(vt3, vs3); const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one); const float16x8_t vts4 = vmulq_f16(vt4, vs4); const float16x8_t vsmo4 = vaddq_f16(vs4, vminus_one); const float16x8_t vts5 = vmulq_f16(vt5, vs5); const float16x8_t vsmo5 = vaddq_f16(vs5, vminus_one); const float16x8_t vts6 = vmulq_f16(vt6, vs6); const float16x8_t vsmo6 = vaddq_f16(vs6, vminus_one); const float16x8_t vts7 = vmulq_f16(vt7, vs7); const float16x8_t vsmo7 = vaddq_f16(vs7, vminus_one); const float16x8_t vts8 = vmulq_f16(vt8, vs8); const float16x8_t vsmo8 = vaddq_f16(vs8, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2); const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3); const float16x8_t vemo4 = vfmsq_f16(vsmo4, vp4, vts4); const float16x8_t vemo5 = vfmsq_f16(vsmo5, vp5, vts5); const float16x8_t vemo6 = vfmsq_f16(vsmo6, vp6, vts6); const float16x8_t vemo7 = vfmsq_f16(vsmo7, vp7, vts7); const float16x8_t vemo8 = vfmsq_f16(vsmo8, vp8, vts8); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo); const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo); const float16x8_t vepo4 = vaddq_f16(vemo4, vtwo); const float16x8_t vepo5 = vaddq_f16(vemo5, vtwo); const float16x8_t vepo6 = vaddq_f16(vemo6, vtwo); const float16x8_t vepo7 = vaddq_f16(vemo7, vtwo); const float16x8_t vepo8 = vaddq_f16(vemo8, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); float16x8_t vrepo2 = vrecpeq_f16(vepo2); float16x8_t vrepo3 = vrecpeq_f16(vepo3); float16x8_t vrepo4 = vrecpeq_f16(vepo4); float16x8_t vrepo5 = vrecpeq_f16(vepo5); float16x8_t vrepo6 = vrecpeq_f16(vepo6); float16x8_t vrepo7 = vrecpeq_f16(vepo7); float16x8_t vrepo8 = vrecpeq_f16(vepo8); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); float16x8_t vy2 = vmulq_f16(vemo2, vrepo2); float16x8_t vy3 = vmulq_f16(vemo3, vrepo3); float16x8_t vy4 = vmulq_f16(vemo4, vrepo4); float16x8_t vy5 = vmulq_f16(vemo5, vrepo5); float16x8_t vy6 = vmulq_f16(vemo6, vrepo6); float16x8_t vy7 = vmulq_f16(vemo7, vrepo7); float16x8_t vy8 = vmulq_f16(vemo8, vrepo8); const float16x8_t vey0 = vfmsq_f16(vemo0, vy0, vepo0); const float16x8_t vey1 = vfmsq_f16(vemo1, vy1, vepo1); const float16x8_t vey2 = vfmsq_f16(vemo2, vy2, vepo2); const float16x8_t vey3 = vfmsq_f16(vemo3, vy3, vepo3); const float16x8_t vey4 = vfmsq_f16(vemo4, vy4, vepo4); const float16x8_t vey5 = vfmsq_f16(vemo5, vy5, vepo5); const float16x8_t vey6 = vfmsq_f16(vemo6, vy6, vepo6); const float16x8_t vey7 = vfmsq_f16(vemo7, vy7, vepo7); const float16x8_t vey8 = vfmsq_f16(vemo8, vy8, vepo8); vy0 = vfmaq_f16(vy0, vey0, vrepo0); vy1 = vfmaq_f16(vy1, vey1, vrepo1); vy2 = vfmaq_f16(vy2, vey2, vrepo2); vy3 = vfmaq_f16(vy3, vey3, vrepo3); vy4 = vfmaq_f16(vy4, vey4, vrepo4); vy5 = vfmaq_f16(vy5, vey5, vrepo5); vy6 = vfmaq_f16(vy6, vey6, vrepo6); vy7 = vfmaq_f16(vy7, vey7, vrepo7); vy8 = vfmaq_f16(vy8, vey8, vrepo8); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vy2 = vbslq_f16(vsign_mask, vx2, vy2); vy3 = vbslq_f16(vsign_mask, vx3, vy3); vy4 = vbslq_f16(vsign_mask, vx4, vy4); vy5 = vbslq_f16(vsign_mask, vx5, vy5); vy6 = vbslq_f16(vsign_mask, vx6, vy6); vy7 = vbslq_f16(vsign_mask, vx7, vy7); vy8 = vbslq_f16(vsign_mask, vx8, vy8); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy4)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy5)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy6)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy7)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy8)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); float16x8_t vy = vmulq_f16(vemo, vrepo); const float16x8_t vey = vfmsq_f16(vemo, vy, vepo); vy = vfmaq_f16(vy, vey, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); float16x8_t vy = vmulq_f16(vemo, vrepo); const float16x8_t vey = vfmsq_f16(vemo, vy, vepo); vy = vfmaq_f16(vy, vey, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
13,027
43.162712
95
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-recpeadj-x8.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_recpeadj_x8( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); float16x8_t vy = vmulq_f16(vemo, vrepo); const float16x8_t vey = vfmsq_f16(vemo, vy, vepo); vy = vfmaq_f16(vy, vey, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); float16x8_t vy = vmulq_f16(vemo, vrepo); const float16x8_t vey = vfmsq_f16(vemo, vy, vepo); vy = vfmaq_f16(vy, vey, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
3,941
34.196429
93
c
XNNPACK
XNNPACK-master/src/f16-vtanh/gen/f16-vtanh-neonfp16arith-expm1minus-rr1-p3h2ts-recpeadj-x80.c
// Auto-generated file. Do not edit! // Template: src/f16-vtanh/neonfp16arith-expm1minus.c.in // Generator: tools/xngen // // Copyright 2023 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/microparams.h> void xnn_f16_vtanh_ukernel__neonfp16arith_expm1minus_rr1_p3h2ts_recpeadj_x80( size_t n, const void* input, void* output, const union xnn_f16_tanh_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(n != 0); assert(n % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const float16x8_t vsat_cutoff = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4482))); const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x620F))); const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBDC5))); const float16x8_t vln2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x398C))); const float16x8_t vc3 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBD5B))); const float16x8_t vc2 = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4008))); const float16x8_t vtwo = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0x4000))); const float16x8_t vminus_one = vreinterpretq_f16_u16(vmovq_n_u16(UINT16_C(0xBC00))); const uint16x8_t vsign_mask = vmovq_n_u16(UINT16_C(0x8000)); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; n >= 10 * sizeof(float16x8_t); n -= 10 * sizeof(float16x8_t)) { const float16x8_t vx0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx2 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx3 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx4 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx5 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx6 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx7 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx8 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; const float16x8_t vx9 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz0 = vabsq_f16(vx0); float16x8_t vz1 = vabsq_f16(vx1); float16x8_t vz2 = vabsq_f16(vx2); float16x8_t vz3 = vabsq_f16(vx3); float16x8_t vz4 = vabsq_f16(vx4); float16x8_t vz5 = vabsq_f16(vx5); float16x8_t vz6 = vabsq_f16(vx6); float16x8_t vz7 = vabsq_f16(vx7); float16x8_t vz8 = vabsq_f16(vx8); float16x8_t vz9 = vabsq_f16(vx9); vz0 = vminq_f16(vz0, vsat_cutoff); vz1 = vminq_f16(vz1, vsat_cutoff); vz2 = vminq_f16(vz2, vsat_cutoff); vz3 = vminq_f16(vz3, vsat_cutoff); vz4 = vminq_f16(vz4, vsat_cutoff); vz5 = vminq_f16(vz5, vsat_cutoff); vz6 = vminq_f16(vz6, vsat_cutoff); vz7 = vminq_f16(vz7, vsat_cutoff); vz8 = vminq_f16(vz8, vsat_cutoff); vz9 = vminq_f16(vz9, vsat_cutoff); float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e); float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e); float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e); float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e); float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e); float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e); float16x8_t vn6 = vfmaq_f16(vmagic_bias, vz6, vminus_log2e); float16x8_t vn7 = vfmaq_f16(vmagic_bias, vz7, vminus_log2e); float16x8_t vn8 = vfmaq_f16(vmagic_bias, vz8, vminus_log2e); float16x8_t vn9 = vfmaq_f16(vmagic_bias, vz9, vminus_log2e); const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10)); vn0 = vsubq_f16(vn0, vmagic_bias); const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10)); vn1 = vsubq_f16(vn1, vmagic_bias); const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10)); vn2 = vsubq_f16(vn2, vmagic_bias); const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10)); vn3 = vsubq_f16(vn3, vmagic_bias); const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10)); vn4 = vsubq_f16(vn4, vmagic_bias); const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10)); vn5 = vsubq_f16(vn5, vmagic_bias); const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10)); vn6 = vsubq_f16(vn6, vmagic_bias); const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10)); vn7 = vsubq_f16(vn7, vmagic_bias); const float16x8_t vs8 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn8), 10)); vn8 = vsubq_f16(vn8, vmagic_bias); const float16x8_t vs9 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn9), 10)); vn9 = vsubq_f16(vn9, vmagic_bias); const float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2); const float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2); const float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2); const float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2); const float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2); const float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2); const float16x8_t vt6 = vfmaq_f16(vz6, vn6, vln2); const float16x8_t vt7 = vfmaq_f16(vz7, vn7, vln2); const float16x8_t vt8 = vfmaq_f16(vz8, vn8, vln2); const float16x8_t vt9 = vfmaq_f16(vz9, vn9, vln2); float16x8_t vp0 = vfmaq_f16(vc2, vc3, vt0); float16x8_t vp1 = vfmaq_f16(vc2, vc3, vt1); float16x8_t vp2 = vfmaq_f16(vc2, vc3, vt2); float16x8_t vp3 = vfmaq_f16(vc2, vc3, vt3); float16x8_t vp4 = vfmaq_f16(vc2, vc3, vt4); float16x8_t vp5 = vfmaq_f16(vc2, vc3, vt5); float16x8_t vp6 = vfmaq_f16(vc2, vc3, vt6); float16x8_t vp7 = vfmaq_f16(vc2, vc3, vt7); float16x8_t vp8 = vfmaq_f16(vc2, vc3, vt8); float16x8_t vp9 = vfmaq_f16(vc2, vc3, vt9); vp0 = vfmsq_f16(vtwo, vp0, vt0); vp1 = vfmsq_f16(vtwo, vp1, vt1); vp2 = vfmsq_f16(vtwo, vp2, vt2); vp3 = vfmsq_f16(vtwo, vp3, vt3); vp4 = vfmsq_f16(vtwo, vp4, vt4); vp5 = vfmsq_f16(vtwo, vp5, vt5); vp6 = vfmsq_f16(vtwo, vp6, vt6); vp7 = vfmsq_f16(vtwo, vp7, vt7); vp8 = vfmsq_f16(vtwo, vp8, vt8); vp9 = vfmsq_f16(vtwo, vp9, vt9); const float16x8_t vts0 = vmulq_f16(vt0, vs0); const float16x8_t vsmo0 = vaddq_f16(vs0, vminus_one); const float16x8_t vts1 = vmulq_f16(vt1, vs1); const float16x8_t vsmo1 = vaddq_f16(vs1, vminus_one); const float16x8_t vts2 = vmulq_f16(vt2, vs2); const float16x8_t vsmo2 = vaddq_f16(vs2, vminus_one); const float16x8_t vts3 = vmulq_f16(vt3, vs3); const float16x8_t vsmo3 = vaddq_f16(vs3, vminus_one); const float16x8_t vts4 = vmulq_f16(vt4, vs4); const float16x8_t vsmo4 = vaddq_f16(vs4, vminus_one); const float16x8_t vts5 = vmulq_f16(vt5, vs5); const float16x8_t vsmo5 = vaddq_f16(vs5, vminus_one); const float16x8_t vts6 = vmulq_f16(vt6, vs6); const float16x8_t vsmo6 = vaddq_f16(vs6, vminus_one); const float16x8_t vts7 = vmulq_f16(vt7, vs7); const float16x8_t vsmo7 = vaddq_f16(vs7, vminus_one); const float16x8_t vts8 = vmulq_f16(vt8, vs8); const float16x8_t vsmo8 = vaddq_f16(vs8, vminus_one); const float16x8_t vts9 = vmulq_f16(vt9, vs9); const float16x8_t vsmo9 = vaddq_f16(vs9, vminus_one); const float16x8_t vemo0 = vfmsq_f16(vsmo0, vp0, vts0); const float16x8_t vemo1 = vfmsq_f16(vsmo1, vp1, vts1); const float16x8_t vemo2 = vfmsq_f16(vsmo2, vp2, vts2); const float16x8_t vemo3 = vfmsq_f16(vsmo3, vp3, vts3); const float16x8_t vemo4 = vfmsq_f16(vsmo4, vp4, vts4); const float16x8_t vemo5 = vfmsq_f16(vsmo5, vp5, vts5); const float16x8_t vemo6 = vfmsq_f16(vsmo6, vp6, vts6); const float16x8_t vemo7 = vfmsq_f16(vsmo7, vp7, vts7); const float16x8_t vemo8 = vfmsq_f16(vsmo8, vp8, vts8); const float16x8_t vemo9 = vfmsq_f16(vsmo9, vp9, vts9); const float16x8_t vepo0 = vaddq_f16(vemo0, vtwo); const float16x8_t vepo1 = vaddq_f16(vemo1, vtwo); const float16x8_t vepo2 = vaddq_f16(vemo2, vtwo); const float16x8_t vepo3 = vaddq_f16(vemo3, vtwo); const float16x8_t vepo4 = vaddq_f16(vemo4, vtwo); const float16x8_t vepo5 = vaddq_f16(vemo5, vtwo); const float16x8_t vepo6 = vaddq_f16(vemo6, vtwo); const float16x8_t vepo7 = vaddq_f16(vemo7, vtwo); const float16x8_t vepo8 = vaddq_f16(vemo8, vtwo); const float16x8_t vepo9 = vaddq_f16(vemo9, vtwo); float16x8_t vrepo0 = vrecpeq_f16(vepo0); float16x8_t vrepo1 = vrecpeq_f16(vepo1); float16x8_t vrepo2 = vrecpeq_f16(vepo2); float16x8_t vrepo3 = vrecpeq_f16(vepo3); float16x8_t vrepo4 = vrecpeq_f16(vepo4); float16x8_t vrepo5 = vrecpeq_f16(vepo5); float16x8_t vrepo6 = vrecpeq_f16(vepo6); float16x8_t vrepo7 = vrecpeq_f16(vepo7); float16x8_t vrepo8 = vrecpeq_f16(vepo8); float16x8_t vrepo9 = vrecpeq_f16(vepo9); float16x8_t vy0 = vmulq_f16(vemo0, vrepo0); float16x8_t vy1 = vmulq_f16(vemo1, vrepo1); float16x8_t vy2 = vmulq_f16(vemo2, vrepo2); float16x8_t vy3 = vmulq_f16(vemo3, vrepo3); float16x8_t vy4 = vmulq_f16(vemo4, vrepo4); float16x8_t vy5 = vmulq_f16(vemo5, vrepo5); float16x8_t vy6 = vmulq_f16(vemo6, vrepo6); float16x8_t vy7 = vmulq_f16(vemo7, vrepo7); float16x8_t vy8 = vmulq_f16(vemo8, vrepo8); float16x8_t vy9 = vmulq_f16(vemo9, vrepo9); const float16x8_t vey0 = vfmsq_f16(vemo0, vy0, vepo0); const float16x8_t vey1 = vfmsq_f16(vemo1, vy1, vepo1); const float16x8_t vey2 = vfmsq_f16(vemo2, vy2, vepo2); const float16x8_t vey3 = vfmsq_f16(vemo3, vy3, vepo3); const float16x8_t vey4 = vfmsq_f16(vemo4, vy4, vepo4); const float16x8_t vey5 = vfmsq_f16(vemo5, vy5, vepo5); const float16x8_t vey6 = vfmsq_f16(vemo6, vy6, vepo6); const float16x8_t vey7 = vfmsq_f16(vemo7, vy7, vepo7); const float16x8_t vey8 = vfmsq_f16(vemo8, vy8, vepo8); const float16x8_t vey9 = vfmsq_f16(vemo9, vy9, vepo9); vy0 = vfmaq_f16(vy0, vey0, vrepo0); vy1 = vfmaq_f16(vy1, vey1, vrepo1); vy2 = vfmaq_f16(vy2, vey2, vrepo2); vy3 = vfmaq_f16(vy3, vey3, vrepo3); vy4 = vfmaq_f16(vy4, vey4, vrepo4); vy5 = vfmaq_f16(vy5, vey5, vrepo5); vy6 = vfmaq_f16(vy6, vey6, vrepo6); vy7 = vfmaq_f16(vy7, vey7, vrepo7); vy8 = vfmaq_f16(vy8, vey8, vrepo8); vy9 = vfmaq_f16(vy9, vey9, vrepo9); vy0 = vbslq_f16(vsign_mask, vx0, vy0); vy1 = vbslq_f16(vsign_mask, vx1, vy1); vy2 = vbslq_f16(vsign_mask, vx2, vy2); vy3 = vbslq_f16(vsign_mask, vx3, vy3); vy4 = vbslq_f16(vsign_mask, vx4, vy4); vy5 = vbslq_f16(vsign_mask, vx5, vy5); vy6 = vbslq_f16(vsign_mask, vx6, vy6); vy7 = vbslq_f16(vsign_mask, vx7, vy7); vy8 = vbslq_f16(vsign_mask, vx8, vy8); vy9 = vbslq_f16(vsign_mask, vx9, vy9); vst1q_u16(o, vreinterpretq_u16_f16(vy0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy1)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy2)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy3)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy4)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy5)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy6)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy7)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy8)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vy9)); o += 8; } for (; n >= 1 * sizeof(float16x8_t); n -= 1 * sizeof(float16x8_t)) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); float16x8_t vy = vmulq_f16(vemo, vrepo); const float16x8_t vey = vfmsq_f16(vemo, vy, vepo); vy = vfmaq_f16(vy, vey, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); vst1q_u16(o, vreinterpretq_u16_f16(vy)); o += 8; } if (n != 0) { const float16x8_t vx = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vz = vabsq_f16(vx); vz = vminq_f16(vz, vsat_cutoff); float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e); const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10)); vn = vsubq_f16(vn, vmagic_bias); const float16x8_t vt = vfmaq_f16(vz, vn, vln2); float16x8_t vp = vfmaq_f16(vc2, vc3, vt); vp = vfmsq_f16(vtwo, vp, vt); const float16x8_t vts = vmulq_f16(vt, vs); const float16x8_t vsmo = vaddq_f16(vs, vminus_one); const float16x8_t vemo = vfmsq_f16(vsmo, vp, vts); const float16x8_t vepo = vaddq_f16(vemo, vtwo); float16x8_t vrepo = vrecpeq_f16(vepo); float16x8_t vy = vmulq_f16(vemo, vrepo); const float16x8_t vey = vfmsq_f16(vemo, vy, vepo); vy = vfmaq_f16(vy, vey, vrepo); vy = vbslq_f16(vsign_mask, vx, vy); float16x4_t vy_lo = vget_low_f16(vy); if (n & 4 * sizeof(uint16_t)) { vst1_u16(o, vreinterpret_u16_f16(vy_lo)); o += 4; vy_lo = vget_high_f16(vy); } if (n & 2 * sizeof(uint16_t)) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vy_lo), 0); o+= 2; vy_lo = vext_f16(vy_lo, vy_lo, 2); } if (n & 1 * sizeof(uint16_t)) { vst1_lane_u16(o, vreinterpret_u16_f16(vy_lo), 0); } } }
14,029
43.681529
95
c
XNNPACK
XNNPACK-master/src/f16-vunary/gen/f16-vabs-neonfp16arith-x16.c
// Auto-generated file. Do not edit! // Template: src/f16-vunary/neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/vunary.h> void xnn_f16_vabs_ukernel__neonfp16arith_x16( size_t batch, const void* input, void* output, const union xnn_f16_abs_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) { float16x8_t vacc0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vacc1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; vacc0 = vabsq_f16(vacc0); vacc1 = vabsq_f16(vacc1); vst1q_u16(o, vreinterpretq_u16_f16(vacc0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vacc1)); o += 8; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; vacc = vabsq_f16(vacc); vst1q_u16(o, vreinterpretq_u16_f16(vacc)); o += 8; } if XNN_UNLIKELY(batch != 0) { float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i)); vacc = vabsq_f16(vacc); float16x4_t vacc_lo = vget_low_f16(vacc); if (batch & (4 * sizeof(uint16_t))) { vst1_u16(o, vreinterpret_u16_f16(vacc_lo)); o += 4; vacc_lo = vget_high_f16(vacc); } if (batch & (2 * sizeof(uint16_t))) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vacc_lo), 0); o += 2; vacc_lo = vext_f16(vacc_lo, vacc_lo, 2); } if (batch & (1 * sizeof(uint16_t))) { vst1_lane_u16(o, vreinterpret_u16_f16(vacc_lo), 0); } } }
1,985
30.52381
86
c
XNNPACK
XNNPACK-master/src/f16-vunary/gen/f16-vabs-neonfp16arith-x8.c
// Auto-generated file. Do not edit! // Template: src/f16-vunary/neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/vunary.h> void xnn_f16_vabs_ukernel__neonfp16arith_x8( size_t batch, const void* input, void* output, const union xnn_f16_abs_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; vacc = vabsq_f16(vacc); vst1q_u16(o, vreinterpretq_u16_f16(vacc)); o += 8; } if XNN_UNLIKELY(batch != 0) { float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i)); vacc = vabsq_f16(vacc); float16x4_t vacc_lo = vget_low_f16(vacc); if (batch & (4 * sizeof(uint16_t))) { vst1_u16(o, vreinterpret_u16_f16(vacc_lo)); o += 4; vacc_lo = vget_high_f16(vacc); } if (batch & (2 * sizeof(uint16_t))) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vacc_lo), 0); o += 2; vacc_lo = vext_f16(vacc_lo, vacc_lo, 2); } if (batch & (1 * sizeof(uint16_t))) { vst1_lane_u16(o, vreinterpret_u16_f16(vacc_lo), 0); } } }
1,593
29.075472
86
c
XNNPACK
XNNPACK-master/src/f16-vunary/gen/f16-vabs-sse2-x16.c
// Auto-generated file. Do not edit! // Template: src/f16-vunary/sse2.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> #include <xnnpack/vunary.h> void xnn_f16_vabs_ukernel__sse2_x16( size_t batch, const void* input, void* output, const union xnn_f16_abs_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; const __m128i vnonsign_mask = _mm_load_si128((const __m128i*) params->sse.nonsign_mask); for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) { __m128i vacc0 = _mm_loadu_si128((const __m128i*) i); __m128i vacc1 = _mm_loadu_si128((const __m128i*) (i + 8)); i += 16; vacc0 = _mm_and_si128(vacc0, vnonsign_mask); vacc1 = _mm_and_si128(vacc1, vnonsign_mask); _mm_storeu_si128((__m128i*) o, vacc0); _mm_storeu_si128((__m128i*) (o + 8), vacc1); o += 16; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m128i vacc = _mm_loadu_si128((const __m128i*) i); i += 8; vacc = _mm_and_si128(vacc, vnonsign_mask); _mm_storeu_si128((__m128i*) o, vacc); o += 8; } if XNN_UNLIKELY(batch != 0) { __m128i vacc = _mm_loadu_si128((const __m128i*) i); vacc = _mm_and_si128(vacc, vnonsign_mask); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vacc); o += 4; vacc = _mm_unpackhi_epi64(vacc, vacc); } if (batch & (2 * sizeof(uint16_t))) { unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vacc)); o += 2; vacc = _mm_srli_epi64(vacc, 32); } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vacc, 0); } } }
2,127
28.971831
90
c
XNNPACK
XNNPACK-master/src/f16-vunary/gen/f16-vabs-sse2-x8.c
// Auto-generated file. Do not edit! // Template: src/f16-vunary/sse2.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> #include <xnnpack/vunary.h> void xnn_f16_vabs_ukernel__sse2_x8( size_t batch, const void* input, void* output, const union xnn_f16_abs_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; const __m128i vnonsign_mask = _mm_load_si128((const __m128i*) params->sse.nonsign_mask); for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m128i vacc = _mm_loadu_si128((const __m128i*) i); i += 8; vacc = _mm_and_si128(vacc, vnonsign_mask); _mm_storeu_si128((__m128i*) o, vacc); o += 8; } if XNN_UNLIKELY(batch != 0) { __m128i vacc = _mm_loadu_si128((const __m128i*) i); vacc = _mm_and_si128(vacc, vnonsign_mask); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vacc); o += 4; vacc = _mm_unpackhi_epi64(vacc, vacc); } if (batch & (2 * sizeof(uint16_t))) { unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vacc)); o += 2; vacc = _mm_srli_epi64(vacc, 32); } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vacc, 0); } } }
1,709
27.983051
90
c
XNNPACK
XNNPACK-master/src/f16-vunary/gen/f16-vneg-neonfp16arith-x16.c
// Auto-generated file. Do not edit! // Template: src/f16-vunary/neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/vunary.h> void xnn_f16_vneg_ukernel__neonfp16arith_x16( size_t batch, const void* input, void* output, const union xnn_f16_neg_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) { float16x8_t vacc0 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; float16x8_t vacc1 = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; vacc0 = vnegq_f16(vacc0); vacc1 = vnegq_f16(vacc1); vst1q_u16(o, vreinterpretq_u16_f16(vacc0)); o += 8; vst1q_u16(o, vreinterpretq_u16_f16(vacc1)); o += 8; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; vacc = vnegq_f16(vacc); vst1q_u16(o, vreinterpretq_u16_f16(vacc)); o += 8; } if XNN_UNLIKELY(batch != 0) { float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i)); vacc = vnegq_f16(vacc); float16x4_t vacc_lo = vget_low_f16(vacc); if (batch & (4 * sizeof(uint16_t))) { vst1_u16(o, vreinterpret_u16_f16(vacc_lo)); o += 4; vacc_lo = vget_high_f16(vacc); } if (batch & (2 * sizeof(uint16_t))) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vacc_lo), 0); o += 2; vacc_lo = vext_f16(vacc_lo, vacc_lo, 2); } if (batch & (1 * sizeof(uint16_t))) { vst1_lane_u16(o, vreinterpret_u16_f16(vacc_lo), 0); } } }
1,985
30.52381
86
c
XNNPACK
XNNPACK-master/src/f16-vunary/gen/f16-vneg-neonfp16arith-x8.c
// Auto-generated file. Do not edit! // Template: src/f16-vunary/neonfp16arith.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/vunary.h> void xnn_f16_vneg_ukernel__neonfp16arith_x8( size_t batch, const void* input, void* output, const union xnn_f16_neg_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i)); i += 8; vacc = vnegq_f16(vacc); vst1q_u16(o, vreinterpretq_u16_f16(vacc)); o += 8; } if XNN_UNLIKELY(batch != 0) { float16x8_t vacc = vreinterpretq_f16_u16(vld1q_u16(i)); vacc = vnegq_f16(vacc); float16x4_t vacc_lo = vget_low_f16(vacc); if (batch & (4 * sizeof(uint16_t))) { vst1_u16(o, vreinterpret_u16_f16(vacc_lo)); o += 4; vacc_lo = vget_high_f16(vacc); } if (batch & (2 * sizeof(uint16_t))) { vst1_lane_u32((void*) o, vreinterpret_u32_f16(vacc_lo), 0); o += 2; vacc_lo = vext_f16(vacc_lo, vacc_lo, 2); } if (batch & (1 * sizeof(uint16_t))) { vst1_lane_u16(o, vreinterpret_u16_f16(vacc_lo), 0); } } }
1,593
29.075472
86
c
XNNPACK
XNNPACK-master/src/f16-vunary/gen/f16-vneg-sse2-x16.c
// Auto-generated file. Do not edit! // Template: src/f16-vunary/sse2.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> #include <xnnpack/vunary.h> void xnn_f16_vneg_ukernel__sse2_x16( size_t batch, const void* input, void* output, const union xnn_f16_neg_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse.sign_mask); for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) { __m128i vacc0 = _mm_loadu_si128((const __m128i*) i); __m128i vacc1 = _mm_loadu_si128((const __m128i*) (i + 8)); i += 16; vacc0 = _mm_xor_si128(vacc0, vsign_mask); vacc1 = _mm_xor_si128(vacc1, vsign_mask); _mm_storeu_si128((__m128i*) o, vacc0); _mm_storeu_si128((__m128i*) (o + 8), vacc1); o += 16; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m128i vacc = _mm_loadu_si128((const __m128i*) i); i += 8; vacc = _mm_xor_si128(vacc, vsign_mask); _mm_storeu_si128((__m128i*) o, vacc); o += 8; } if XNN_UNLIKELY(batch != 0) { __m128i vacc = _mm_loadu_si128((const __m128i*) i); vacc = _mm_xor_si128(vacc, vsign_mask); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vacc); o += 4; vacc = _mm_unpackhi_epi64(vacc, vacc); } if (batch & (2 * sizeof(uint16_t))) { unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vacc)); o += 2; vacc = _mm_srli_epi64(vacc, 32); } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vacc, 0); } } }
2,109
28.71831
86
c
XNNPACK
XNNPACK-master/src/f16-vunary/gen/f16-vneg-sse2-x8.c
// Auto-generated file. Do not edit! // Template: src/f16-vunary/sse2.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> #include <xnnpack/vunary.h> void xnn_f16_vneg_ukernel__sse2_x8( size_t batch, const void* input, void* output, const union xnn_f16_neg_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse.sign_mask); for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m128i vacc = _mm_loadu_si128((const __m128i*) i); i += 8; vacc = _mm_xor_si128(vacc, vsign_mask); _mm_storeu_si128((__m128i*) o, vacc); o += 8; } if XNN_UNLIKELY(batch != 0) { __m128i vacc = _mm_loadu_si128((const __m128i*) i); vacc = _mm_xor_si128(vacc, vsign_mask); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vacc); o += 4; vacc = _mm_unpackhi_epi64(vacc, vacc); } if (batch & (2 * sizeof(uint16_t))) { unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vacc)); o += 2; vacc = _mm_srli_epi64(vacc, 32); } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vacc, 0); } } }
1,697
27.779661
86
c
XNNPACK
XNNPACK-master/src/f16-vunary/gen/f16-vsqr-f16c-x16.c
// Auto-generated file. Do not edit! // Template: src/f16-vunary/f16c.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/vunary.h> void xnn_f16_vsqr_ukernel__f16c_x16( size_t batch, const void* input, void* output, const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) { __m256 vacc0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); __m256 vacc1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8))); i += 16; vacc0 = _mm256_mul_ps(vacc0, vacc0); vacc1 = _mm256_mul_ps(vacc1, vacc1); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc0, _MM_FROUND_TO_NEAREST_INT)); _mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vacc1, _MM_FROUND_TO_NEAREST_INT)); o += 16; } for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); i += 8; vacc = _mm256_mul_ps(vacc, vacc); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT)); o += 8; } if XNN_UNLIKELY(batch != 0) { __m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); vacc = _mm256_mul_ps(vacc, vacc); __m128i vh = _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); o += 4; vh = _mm_unpackhi_epi64(vh, vh); } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); o += 2; vh = _mm_srli_epi64(vh, 32); } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
2,203
30.485714
92
c
XNNPACK
XNNPACK-master/src/f16-vunary/gen/f16-vsqr-f16c-x8.c
// Auto-generated file. Do not edit! // Template: src/f16-vunary/f16c.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/common.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/vunary.h> void xnn_f16_vsqr_ukernel__f16c_x8( size_t batch, const void* input, void* output, const union xnn_f16_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(batch != 0); assert(batch % sizeof(uint16_t) == 0); assert(input != NULL); assert(output != NULL); const uint16_t* i = (const uint16_t*) input; uint16_t* o = (uint16_t*) output; for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { __m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); i += 8; vacc = _mm256_mul_ps(vacc, vacc); _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT)); o += 8; } if XNN_UNLIKELY(batch != 0) { __m256 vacc = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); vacc = _mm256_mul_ps(vacc, vacc); __m128i vh = _mm256_cvtps_ph(vacc, _MM_FROUND_TO_NEAREST_INT); if (batch & (4 * sizeof(uint16_t))) { _mm_storel_epi64((__m128i*) o, vh); o += 4; vh = _mm_unpackhi_epi64(vh, vh); } if (batch & (2 * sizeof(uint16_t))) { _mm_storeu_si32(o, vh); o += 2; vh = _mm_srli_epi64(vh, 32); } if (batch & (1 * sizeof(uint16_t))) { *o = (uint16_t) _mm_extract_epi16(vh, 0); } } }
1,681
28
90
c