repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-nr1fma-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_nr1fma_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
input += 24;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
__m256 vr2 = _mm256_rcp_ps(vd2);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 7,227 | 35.321608 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-nr1fma-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_nr1fma_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
input += 32;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
__m256 vr2 = _mm256_rcp_ps(vd2);
__m256 vr3 = _mm256_rcp_ps(vd3);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 8,234 | 36.775229 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-nr1fma-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_nr1fma_x40(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 40 * sizeof(float); batch -= 40 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
input += 40;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
__m256 vr2 = _mm256_rcp_ps(vd2);
__m256 vr3 = _mm256_rcp_ps(vd3);
__m256 vr4 = _mm256_rcp_ps(vd4);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
__m256 vf4 = _mm256_mul_ps(ve4, vr4);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
output += 40;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 9,241 | 37.995781 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-nr1fma-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_nr1fma_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
input += 48;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 ve5 = _mm256_fmadd_ps(vt5, vp5, vs5);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
__m256 vr2 = _mm256_rcp_ps(vd2);
__m256 vr3 = _mm256_rcp_ps(vd3);
__m256 vr4 = _mm256_rcp_ps(vd4);
__m256 vr5 = _mm256_rcp_ps(vd5);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
vr5 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr5, vd5, vone), vr5, vr5);
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
__m256 vf4 = _mm256_mul_ps(ve4, vr4);
__m256 vf5 = _mm256_mul_ps(ve5, vr5);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
output += 48;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 10,248 | 39.035156 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-nr1fma-x56.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_nr1fma_x56(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 56 * sizeof(float); batch -= 56 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
input += 56;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 ve5 = _mm256_fmadd_ps(vt5, vp5, vs5);
const __m256 ve6 = _mm256_fmadd_ps(vt6, vp6, vs6);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
const __m256 vd6 = _mm256_add_ps(ve6, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
__m256 vr2 = _mm256_rcp_ps(vd2);
__m256 vr3 = _mm256_rcp_ps(vd3);
__m256 vr4 = _mm256_rcp_ps(vd4);
__m256 vr5 = _mm256_rcp_ps(vd5);
__m256 vr6 = _mm256_rcp_ps(vd6);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
vr5 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr5, vd5, vone), vr5, vr5);
vr6 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr6, vd6, vone), vr6, vr6);
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
__m256 vf4 = _mm256_mul_ps(ve4, vr4);
__m256 vf5 = _mm256_mul_ps(ve5, vr5);
__m256 vf6 = _mm256_mul_ps(ve6, vr6);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vz6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
vf6 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf6), vf6, vx6);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
output += 56;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 11,255 | 39.930909 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-nr1fma-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_nr1fma_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
input += 64;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 ve5 = _mm256_fmadd_ps(vt5, vp5, vs5);
const __m256 ve6 = _mm256_fmadd_ps(vt6, vp6, vs6);
const __m256 ve7 = _mm256_fmadd_ps(vt7, vp7, vs7);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
const __m256 vd6 = _mm256_add_ps(ve6, vone);
const __m256 vd7 = _mm256_add_ps(ve7, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
__m256 vr2 = _mm256_rcp_ps(vd2);
__m256 vr3 = _mm256_rcp_ps(vd3);
__m256 vr4 = _mm256_rcp_ps(vd4);
__m256 vr5 = _mm256_rcp_ps(vd5);
__m256 vr6 = _mm256_rcp_ps(vd6);
__m256 vr7 = _mm256_rcp_ps(vd7);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
vr5 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr5, vd5, vone), vr5, vr5);
vr6 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr6, vd6, vone), vr6, vr6);
vr7 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr7, vd7, vone), vr7, vr7);
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
__m256 vf4 = _mm256_mul_ps(ve4, vr4);
__m256 vf5 = _mm256_mul_ps(ve5, vr5);
__m256 vf6 = _mm256_mul_ps(ve6, vr6);
__m256 vf7 = _mm256_mul_ps(ve7, vr7);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vz6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vz7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
vf6 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf6), vf6, vx6);
vf7 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf7), vf7, vx7);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
output += 64;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 12,262 | 40.710884 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-nr1fma-x72.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_nr1fma_x72(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 72 * sizeof(float); batch -= 72 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
input += 72;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
const __m256 vz8 = _mm256_or_ps(vx8, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 ve5 = _mm256_fmadd_ps(vt5, vp5, vs5);
const __m256 ve6 = _mm256_fmadd_ps(vt6, vp6, vs6);
const __m256 ve7 = _mm256_fmadd_ps(vt7, vp7, vs7);
const __m256 ve8 = _mm256_fmadd_ps(vt8, vp8, vs8);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
const __m256 vd6 = _mm256_add_ps(ve6, vone);
const __m256 vd7 = _mm256_add_ps(ve7, vone);
const __m256 vd8 = _mm256_add_ps(ve8, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
__m256 vr2 = _mm256_rcp_ps(vd2);
__m256 vr3 = _mm256_rcp_ps(vd3);
__m256 vr4 = _mm256_rcp_ps(vd4);
__m256 vr5 = _mm256_rcp_ps(vd5);
__m256 vr6 = _mm256_rcp_ps(vd6);
__m256 vr7 = _mm256_rcp_ps(vd7);
__m256 vr8 = _mm256_rcp_ps(vd8);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
vr5 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr5, vd5, vone), vr5, vr5);
vr6 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr6, vd6, vone), vr6, vr6);
vr7 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr7, vd7, vone), vr7, vr7);
vr8 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr8, vd8, vone), vr8, vr8);
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
__m256 vf4 = _mm256_mul_ps(ve4, vr4);
__m256 vf5 = _mm256_mul_ps(ve5, vr5);
__m256 vf6 = _mm256_mul_ps(ve6, vr6);
__m256 vf7 = _mm256_mul_ps(ve7, vr7);
__m256 vf8 = _mm256_mul_ps(ve8, vr8);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vz6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vz7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vz8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
vf6 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf6), vf6, vx6);
vf7 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf7), vf7, vx7);
vf8 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf8), vf8, vx8);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
output += 72;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 13,269 | 41.396166 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-nr1fma-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_nr1fma_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 4,090 | 33.378151 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-nr1fma-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_nr1fma_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
const __m256 vx9 = _mm256_loadu_ps(input + 72);
input += 80;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
const __m256 vz8 = _mm256_or_ps(vx8, vsign_mask);
const __m256 vz9 = _mm256_or_ps(vx9, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vz9, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vz9);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 ve5 = _mm256_fmadd_ps(vt5, vp5, vs5);
const __m256 ve6 = _mm256_fmadd_ps(vt6, vp6, vs6);
const __m256 ve7 = _mm256_fmadd_ps(vt7, vp7, vs7);
const __m256 ve8 = _mm256_fmadd_ps(vt8, vp8, vs8);
const __m256 ve9 = _mm256_fmadd_ps(vt9, vp9, vs9);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
const __m256 vd6 = _mm256_add_ps(ve6, vone);
const __m256 vd7 = _mm256_add_ps(ve7, vone);
const __m256 vd8 = _mm256_add_ps(ve8, vone);
const __m256 vd9 = _mm256_add_ps(ve9, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
__m256 vr2 = _mm256_rcp_ps(vd2);
__m256 vr3 = _mm256_rcp_ps(vd3);
__m256 vr4 = _mm256_rcp_ps(vd4);
__m256 vr5 = _mm256_rcp_ps(vd5);
__m256 vr6 = _mm256_rcp_ps(vd6);
__m256 vr7 = _mm256_rcp_ps(vd7);
__m256 vr8 = _mm256_rcp_ps(vd8);
__m256 vr9 = _mm256_rcp_ps(vd9);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
vr5 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr5, vd5, vone), vr5, vr5);
vr6 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr6, vd6, vone), vr6, vr6);
vr7 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr7, vd7, vone), vr7, vr7);
vr8 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr8, vd8, vone), vr8, vr8);
vr9 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr9, vd9, vone), vr9, vr9);
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
__m256 vf4 = _mm256_mul_ps(ve4, vr4);
__m256 vf5 = _mm256_mul_ps(ve5, vr5);
__m256 vf6 = _mm256_mul_ps(ve6, vr6);
__m256 vf7 = _mm256_mul_ps(ve7, vr7);
__m256 vf8 = _mm256_mul_ps(ve8, vr8);
__m256 vf9 = _mm256_mul_ps(ve9, vr9);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vz6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vz7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vz8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vz9, vdenorm_cutoff, _CMP_LT_OS), vf9);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
vf6 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf6), vf6, vx6);
vf7 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf7), vf7, vx7);
vf8 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf8), vf8, vx8);
vf9 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf9), vf9, vx9);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
_mm256_storeu_ps(output + 72, vf9);
output += 80;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 14,276 | 42.003012 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-nr2fma-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_nr2fma_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
input += 16;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 6,494 | 34.298913 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-nr2fma-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_nr2fma_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
input += 24;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
__m256 vr2 = _mm256_rcp_ps(vd2);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 7,572 | 36.122549 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-nr2fma-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_nr2fma_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
input += 32;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
__m256 vr2 = _mm256_rcp_ps(vd2);
__m256 vr3 = _mm256_rcp_ps(vd3);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 8,650 | 37.620536 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-nr2fma-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_nr2fma_x40(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 40 * sizeof(float); batch -= 40 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
input += 40;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
__m256 vr2 = _mm256_rcp_ps(vd2);
__m256 vr3 = _mm256_rcp_ps(vd3);
__m256 vr4 = _mm256_rcp_ps(vd4);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
__m256 vf4 = _mm256_mul_ps(ve4, vr4);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
output += 40;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 9,728 | 38.872951 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-nr2fma-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_nr2fma_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
input += 48;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 ve5 = _mm256_fmadd_ps(vt5, vp5, vs5);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
__m256 vr2 = _mm256_rcp_ps(vd2);
__m256 vr3 = _mm256_rcp_ps(vd3);
__m256 vr4 = _mm256_rcp_ps(vd4);
__m256 vr5 = _mm256_rcp_ps(vd5);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
vr5 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr5, vd5, vone), vr5, vr5);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
vr5 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr5, vd5, vone), vr5, vr5);
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
__m256 vf4 = _mm256_mul_ps(ve4, vr4);
__m256 vf5 = _mm256_mul_ps(ve5, vr5);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
output += 48;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 10,806 | 39.935606 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-nr2fma-x56.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_nr2fma_x56(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 56 * sizeof(float); batch -= 56 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
input += 56;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 ve5 = _mm256_fmadd_ps(vt5, vp5, vs5);
const __m256 ve6 = _mm256_fmadd_ps(vt6, vp6, vs6);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
const __m256 vd6 = _mm256_add_ps(ve6, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
__m256 vr2 = _mm256_rcp_ps(vd2);
__m256 vr3 = _mm256_rcp_ps(vd3);
__m256 vr4 = _mm256_rcp_ps(vd4);
__m256 vr5 = _mm256_rcp_ps(vd5);
__m256 vr6 = _mm256_rcp_ps(vd6);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
vr5 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr5, vd5, vone), vr5, vr5);
vr6 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr6, vd6, vone), vr6, vr6);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
vr5 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr5, vd5, vone), vr5, vr5);
vr6 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr6, vd6, vone), vr6, vr6);
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
__m256 vf4 = _mm256_mul_ps(ve4, vr4);
__m256 vf5 = _mm256_mul_ps(ve5, vr5);
__m256 vf6 = _mm256_mul_ps(ve6, vr6);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vz6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
vf6 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf6), vf6, vx6);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
output += 56;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 11,884 | 40.848592 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-nr2fma-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_nr2fma_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
input += 64;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 ve5 = _mm256_fmadd_ps(vt5, vp5, vs5);
const __m256 ve6 = _mm256_fmadd_ps(vt6, vp6, vs6);
const __m256 ve7 = _mm256_fmadd_ps(vt7, vp7, vs7);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
const __m256 vd6 = _mm256_add_ps(ve6, vone);
const __m256 vd7 = _mm256_add_ps(ve7, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
__m256 vr2 = _mm256_rcp_ps(vd2);
__m256 vr3 = _mm256_rcp_ps(vd3);
__m256 vr4 = _mm256_rcp_ps(vd4);
__m256 vr5 = _mm256_rcp_ps(vd5);
__m256 vr6 = _mm256_rcp_ps(vd6);
__m256 vr7 = _mm256_rcp_ps(vd7);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
vr5 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr5, vd5, vone), vr5, vr5);
vr6 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr6, vd6, vone), vr6, vr6);
vr7 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr7, vd7, vone), vr7, vr7);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
vr5 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr5, vd5, vone), vr5, vr5);
vr6 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr6, vd6, vone), vr6, vr6);
vr7 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr7, vd7, vone), vr7, vr7);
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
__m256 vf4 = _mm256_mul_ps(ve4, vr4);
__m256 vf5 = _mm256_mul_ps(ve5, vr5);
__m256 vf6 = _mm256_mul_ps(ve6, vr6);
__m256 vf7 = _mm256_mul_ps(ve7, vr7);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vz6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vz7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
vf6 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf6), vf6, vx6);
vf7 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf7), vf7, vx7);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
output += 64;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 12,962 | 41.641447 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-nr2fma-x72.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_nr2fma_x72(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 72 * sizeof(float); batch -= 72 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
input += 72;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
const __m256 vz8 = _mm256_or_ps(vx8, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 ve5 = _mm256_fmadd_ps(vt5, vp5, vs5);
const __m256 ve6 = _mm256_fmadd_ps(vt6, vp6, vs6);
const __m256 ve7 = _mm256_fmadd_ps(vt7, vp7, vs7);
const __m256 ve8 = _mm256_fmadd_ps(vt8, vp8, vs8);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
const __m256 vd6 = _mm256_add_ps(ve6, vone);
const __m256 vd7 = _mm256_add_ps(ve7, vone);
const __m256 vd8 = _mm256_add_ps(ve8, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
__m256 vr2 = _mm256_rcp_ps(vd2);
__m256 vr3 = _mm256_rcp_ps(vd3);
__m256 vr4 = _mm256_rcp_ps(vd4);
__m256 vr5 = _mm256_rcp_ps(vd5);
__m256 vr6 = _mm256_rcp_ps(vd6);
__m256 vr7 = _mm256_rcp_ps(vd7);
__m256 vr8 = _mm256_rcp_ps(vd8);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
vr5 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr5, vd5, vone), vr5, vr5);
vr6 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr6, vd6, vone), vr6, vr6);
vr7 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr7, vd7, vone), vr7, vr7);
vr8 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr8, vd8, vone), vr8, vr8);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
vr5 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr5, vd5, vone), vr5, vr5);
vr6 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr6, vd6, vone), vr6, vr6);
vr7 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr7, vd7, vone), vr7, vr7);
vr8 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr8, vd8, vone), vr8, vr8);
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
__m256 vf4 = _mm256_mul_ps(ve4, vr4);
__m256 vf5 = _mm256_mul_ps(ve5, vr5);
__m256 vf6 = _mm256_mul_ps(ve6, vr6);
__m256 vf7 = _mm256_mul_ps(ve7, vr7);
__m256 vf8 = _mm256_mul_ps(ve8, vr8);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vz6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vz7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vz8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
vf6 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf6), vf6, vx6);
vf7 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf7), vf7, vx7);
vf8 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf8), vf8, vx8);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
output += 72;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 14,040 | 42.33642 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-nr2fma-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_nr2fma_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 4,222 | 33.900826 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-nr2fma-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_nr2fma_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
const __m256 vx9 = _mm256_loadu_ps(input + 72);
input += 80;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
const __m256 vz8 = _mm256_or_ps(vx8, vsign_mask);
const __m256 vz9 = _mm256_or_ps(vx9, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vz9, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vz9);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 ve5 = _mm256_fmadd_ps(vt5, vp5, vs5);
const __m256 ve6 = _mm256_fmadd_ps(vt6, vp6, vs6);
const __m256 ve7 = _mm256_fmadd_ps(vt7, vp7, vs7);
const __m256 ve8 = _mm256_fmadd_ps(vt8, vp8, vs8);
const __m256 ve9 = _mm256_fmadd_ps(vt9, vp9, vs9);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
const __m256 vd6 = _mm256_add_ps(ve6, vone);
const __m256 vd7 = _mm256_add_ps(ve7, vone);
const __m256 vd8 = _mm256_add_ps(ve8, vone);
const __m256 vd9 = _mm256_add_ps(ve9, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
__m256 vr2 = _mm256_rcp_ps(vd2);
__m256 vr3 = _mm256_rcp_ps(vd3);
__m256 vr4 = _mm256_rcp_ps(vd4);
__m256 vr5 = _mm256_rcp_ps(vd5);
__m256 vr6 = _mm256_rcp_ps(vd6);
__m256 vr7 = _mm256_rcp_ps(vd7);
__m256 vr8 = _mm256_rcp_ps(vd8);
__m256 vr9 = _mm256_rcp_ps(vd9);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
vr5 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr5, vd5, vone), vr5, vr5);
vr6 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr6, vd6, vone), vr6, vr6);
vr7 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr7, vd7, vone), vr7, vr7);
vr8 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr8, vd8, vone), vr8, vr8);
vr9 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr9, vd9, vone), vr9, vr9);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
vr5 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr5, vd5, vone), vr5, vr5);
vr6 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr6, vd6, vone), vr6, vr6);
vr7 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr7, vd7, vone), vr7, vr7);
vr8 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr8, vd8, vone), vr8, vr8);
vr9 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr9, vd9, vone), vr9, vr9);
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
__m256 vf4 = _mm256_mul_ps(ve4, vr4);
__m256 vf5 = _mm256_mul_ps(ve5, vr5);
__m256 vf6 = _mm256_mul_ps(ve6, vr6);
__m256 vf7 = _mm256_mul_ps(ve7, vr7);
__m256 vf8 = _mm256_mul_ps(ve8, vr8);
__m256 vf9 = _mm256_mul_ps(ve9, vr9);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vz6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vz7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vz8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vz9, vdenorm_cutoff, _CMP_LT_OS), vf9);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
vf6 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf6), vf6, vx6);
vf7 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf7), vf7, vx7);
vf8 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf8), vf8, vx8);
vf9 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf9), vf9, vx9);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
_mm256_storeu_ps(output + 72, vf9);
output += 80;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 15,118 | 42.950581 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-lut16-p3-perm-scalef-div-x112.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-lut16-p3-perm-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x112(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_lut16_p3.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_lut16_p3.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_lut16_p3.log2e);
const __m512 vtable = _mm512_load_ps(params->avx512_rr1_lut16_p3.table);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c2);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_lut16_p3.one);
for (; batch >= 112 * sizeof(float); batch -= 112 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
input += 112;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
const __m512 vz4 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx4), vsign_mask));
const __m512 vz5 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx5), vsign_mask));
const __m512 vz6 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx6), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutexvar_ps(_mm512_castps_si512(vn0), vtable);
const __m512 vl1 = _mm512_permutexvar_ps(_mm512_castps_si512(vn1), vtable);
const __m512 vl2 = _mm512_permutexvar_ps(_mm512_castps_si512(vn2), vtable);
const __m512 vl3 = _mm512_permutexvar_ps(_mm512_castps_si512(vn3), vtable);
const __m512 vl4 = _mm512_permutexvar_ps(_mm512_castps_si512(vn4), vtable);
const __m512 vl5 = _mm512_permutexvar_ps(_mm512_castps_si512(vn5), vtable);
const __m512 vl6 = _mm512_permutexvar_ps(_mm512_castps_si512(vn6), vtable);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vz5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vz6);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc3, vc2);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc3, vc2);
__m512 vp2 = _mm512_fmadd_ps(vt2, vc3, vc2);
__m512 vp3 = _mm512_fmadd_ps(vt3, vc3, vc2);
__m512 vp4 = _mm512_fmadd_ps(vt4, vc3, vc2);
__m512 vp5 = _mm512_fmadd_ps(vt5, vc3, vc2);
__m512 vp6 = _mm512_fmadd_ps(vt6, vc3, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vp1 = _mm512_mul_ps(vp1, vt1);
vp2 = _mm512_mul_ps(vp2, vt2);
vp3 = _mm512_mul_ps(vp3, vt3);
vp4 = _mm512_mul_ps(vp4, vt4);
vp5 = _mm512_mul_ps(vp5, vt5);
vp6 = _mm512_mul_ps(vp6, vt6);
vp0 = _mm512_fmadd_ps(vt0, vp0, vt0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vt1);
vp2 = _mm512_fmadd_ps(vt2, vp2, vt2);
vp3 = _mm512_fmadd_ps(vt3, vp3, vt3);
vp4 = _mm512_fmadd_ps(vt4, vp4, vt4);
vp5 = _mm512_fmadd_ps(vt5, vp5, vt5);
vp6 = _mm512_fmadd_ps(vt6, vp6, vt6);
vp0 = _mm512_fmadd_ps(vl0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vl1, vp1, vl1);
vp2 = _mm512_fmadd_ps(vl2, vp2, vl2);
vp3 = _mm512_fmadd_ps(vl3, vp3, vl3);
vp4 = _mm512_fmadd_ps(vl4, vp4, vl4);
vp5 = _mm512_fmadd_ps(vl5, vp5, vl5);
vp6 = _mm512_fmadd_ps(vl6, vp6, vl6);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 ve4 = _mm512_scalef_ps(vp4, vn4);
const __m512 ve5 = _mm512_scalef_ps(vp5, vn5);
const __m512 ve6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
const __m512 vd4 = _mm512_add_ps(ve4, vone);
const __m512 vd5 = _mm512_add_ps(ve5, vone);
const __m512 vd6 = _mm512_add_ps(ve6, vone);
__m512 vf0 = _mm512_div_ps(ve0, vd0);
__m512 vf1 = _mm512_div_ps(ve1, vd1);
__m512 vf2 = _mm512_div_ps(ve2, vd2);
__m512 vf3 = _mm512_div_ps(ve3, vd3);
__m512 vf4 = _mm512_div_ps(ve4, vd4);
__m512 vf5 = _mm512_div_ps(ve5, vd5);
__m512 vf6 = _mm512_div_ps(ve6, vd6);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
vf4 = _mm512_mask_sub_ps(vf4, _mm512_testn_epi32_mask(_mm512_castps_si512(vx4), vsign_mask), vone, vf4);
vf5 = _mm512_mask_sub_ps(vf5, _mm512_testn_epi32_mask(_mm512_castps_si512(vx5), vsign_mask), vone, vf5);
vf6 = _mm512_mask_sub_ps(vf6, _mm512_testn_epi32_mask(_mm512_castps_si512(vx6), vsign_mask), vone, vf6);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
output += 112;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 9,533 | 42.140271 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-lut16-p3-perm-scalef-div-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-lut16-p3-perm-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x128(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_lut16_p3.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_lut16_p3.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_lut16_p3.log2e);
const __m512 vtable = _mm512_load_ps(params->avx512_rr1_lut16_p3.table);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c2);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_lut16_p3.one);
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
input += 128;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
const __m512 vz4 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx4), vsign_mask));
const __m512 vz5 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx5), vsign_mask));
const __m512 vz6 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx6), vsign_mask));
const __m512 vz7 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx7), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutexvar_ps(_mm512_castps_si512(vn0), vtable);
const __m512 vl1 = _mm512_permutexvar_ps(_mm512_castps_si512(vn1), vtable);
const __m512 vl2 = _mm512_permutexvar_ps(_mm512_castps_si512(vn2), vtable);
const __m512 vl3 = _mm512_permutexvar_ps(_mm512_castps_si512(vn3), vtable);
const __m512 vl4 = _mm512_permutexvar_ps(_mm512_castps_si512(vn4), vtable);
const __m512 vl5 = _mm512_permutexvar_ps(_mm512_castps_si512(vn5), vtable);
const __m512 vl6 = _mm512_permutexvar_ps(_mm512_castps_si512(vn6), vtable);
const __m512 vl7 = _mm512_permutexvar_ps(_mm512_castps_si512(vn7), vtable);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vz5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vz6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2, vz7);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc3, vc2);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc3, vc2);
__m512 vp2 = _mm512_fmadd_ps(vt2, vc3, vc2);
__m512 vp3 = _mm512_fmadd_ps(vt3, vc3, vc2);
__m512 vp4 = _mm512_fmadd_ps(vt4, vc3, vc2);
__m512 vp5 = _mm512_fmadd_ps(vt5, vc3, vc2);
__m512 vp6 = _mm512_fmadd_ps(vt6, vc3, vc2);
__m512 vp7 = _mm512_fmadd_ps(vt7, vc3, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vp1 = _mm512_mul_ps(vp1, vt1);
vp2 = _mm512_mul_ps(vp2, vt2);
vp3 = _mm512_mul_ps(vp3, vt3);
vp4 = _mm512_mul_ps(vp4, vt4);
vp5 = _mm512_mul_ps(vp5, vt5);
vp6 = _mm512_mul_ps(vp6, vt6);
vp7 = _mm512_mul_ps(vp7, vt7);
vp0 = _mm512_fmadd_ps(vt0, vp0, vt0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vt1);
vp2 = _mm512_fmadd_ps(vt2, vp2, vt2);
vp3 = _mm512_fmadd_ps(vt3, vp3, vt3);
vp4 = _mm512_fmadd_ps(vt4, vp4, vt4);
vp5 = _mm512_fmadd_ps(vt5, vp5, vt5);
vp6 = _mm512_fmadd_ps(vt6, vp6, vt6);
vp7 = _mm512_fmadd_ps(vt7, vp7, vt7);
vp0 = _mm512_fmadd_ps(vl0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vl1, vp1, vl1);
vp2 = _mm512_fmadd_ps(vl2, vp2, vl2);
vp3 = _mm512_fmadd_ps(vl3, vp3, vl3);
vp4 = _mm512_fmadd_ps(vl4, vp4, vl4);
vp5 = _mm512_fmadd_ps(vl5, vp5, vl5);
vp6 = _mm512_fmadd_ps(vl6, vp6, vl6);
vp7 = _mm512_fmadd_ps(vl7, vp7, vl7);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 ve4 = _mm512_scalef_ps(vp4, vn4);
const __m512 ve5 = _mm512_scalef_ps(vp5, vn5);
const __m512 ve6 = _mm512_scalef_ps(vp6, vn6);
const __m512 ve7 = _mm512_scalef_ps(vp7, vn7);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
const __m512 vd4 = _mm512_add_ps(ve4, vone);
const __m512 vd5 = _mm512_add_ps(ve5, vone);
const __m512 vd6 = _mm512_add_ps(ve6, vone);
const __m512 vd7 = _mm512_add_ps(ve7, vone);
__m512 vf0 = _mm512_div_ps(ve0, vd0);
__m512 vf1 = _mm512_div_ps(ve1, vd1);
__m512 vf2 = _mm512_div_ps(ve2, vd2);
__m512 vf3 = _mm512_div_ps(ve3, vd3);
__m512 vf4 = _mm512_div_ps(ve4, vd4);
__m512 vf5 = _mm512_div_ps(ve5, vd5);
__m512 vf6 = _mm512_div_ps(ve6, vd6);
__m512 vf7 = _mm512_div_ps(ve7, vd7);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
vf4 = _mm512_mask_sub_ps(vf4, _mm512_testn_epi32_mask(_mm512_castps_si512(vx4), vsign_mask), vone, vf4);
vf5 = _mm512_mask_sub_ps(vf5, _mm512_testn_epi32_mask(_mm512_castps_si512(vx5), vsign_mask), vone, vf5);
vf6 = _mm512_mask_sub_ps(vf6, _mm512_testn_epi32_mask(_mm512_castps_si512(vx6), vsign_mask), vone, vf6);
vf7 = _mm512_mask_sub_ps(vf7, _mm512_testn_epi32_mask(_mm512_castps_si512(vx7), vsign_mask), vone, vf7);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
output += 128;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 10,384 | 43.004237 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-lut16-p3-perm-scalef-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-lut16-p3-perm-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_lut16_p3.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_lut16_p3.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_lut16_p3.log2e);
const __m512 vtable = _mm512_load_ps(params->avx512_rr1_lut16_p3.table);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c2);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_lut16_p3.one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 3,473 | 34.44898 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-lut16-p3-perm-scalef-div-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-lut16-p3-perm-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_lut16_p3.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_lut16_p3.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_lut16_p3.log2e);
const __m512 vtable = _mm512_load_ps(params->avx512_rr1_lut16_p3.table);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c2);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_lut16_p3.one);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
input += 32;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutexvar_ps(_mm512_castps_si512(vn0), vtable);
const __m512 vl1 = _mm512_permutexvar_ps(_mm512_castps_si512(vn1), vtable);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc3, vc2);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc3, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vp1 = _mm512_mul_ps(vp1, vt1);
vp0 = _mm512_fmadd_ps(vt0, vp0, vt0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vt1);
vp0 = _mm512_fmadd_ps(vl0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vl1, vp1, vl1);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
__m512 vf0 = _mm512_div_ps(ve0, vd0);
__m512 vf1 = _mm512_div_ps(ve1, vd1);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 5,283 | 35.191781 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-lut16-p3-perm-scalef-div-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-lut16-p3-perm-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_lut16_p3.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_lut16_p3.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_lut16_p3.log2e);
const __m512 vtable = _mm512_load_ps(params->avx512_rr1_lut16_p3.table);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c2);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_lut16_p3.one);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
input += 48;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutexvar_ps(_mm512_castps_si512(vn0), vtable);
const __m512 vl1 = _mm512_permutexvar_ps(_mm512_castps_si512(vn1), vtable);
const __m512 vl2 = _mm512_permutexvar_ps(_mm512_castps_si512(vn2), vtable);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc3, vc2);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc3, vc2);
__m512 vp2 = _mm512_fmadd_ps(vt2, vc3, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vp1 = _mm512_mul_ps(vp1, vt1);
vp2 = _mm512_mul_ps(vp2, vt2);
vp0 = _mm512_fmadd_ps(vt0, vp0, vt0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vt1);
vp2 = _mm512_fmadd_ps(vt2, vp2, vt2);
vp0 = _mm512_fmadd_ps(vl0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vl1, vp1, vl1);
vp2 = _mm512_fmadd_ps(vl2, vp2, vl2);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
__m512 vf0 = _mm512_div_ps(ve0, vd0);
__m512 vf1 = _mm512_div_ps(ve1, vd1);
__m512 vf2 = _mm512_div_ps(ve2, vd2);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
output += 48;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 6,132 | 37.093168 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-lut16-p3-perm-scalef-div-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-lut16-p3-perm-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_lut16_p3.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_lut16_p3.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_lut16_p3.log2e);
const __m512 vtable = _mm512_load_ps(params->avx512_rr1_lut16_p3.table);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c2);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_lut16_p3.one);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
input += 64;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutexvar_ps(_mm512_castps_si512(vn0), vtable);
const __m512 vl1 = _mm512_permutexvar_ps(_mm512_castps_si512(vn1), vtable);
const __m512 vl2 = _mm512_permutexvar_ps(_mm512_castps_si512(vn2), vtable);
const __m512 vl3 = _mm512_permutexvar_ps(_mm512_castps_si512(vn3), vtable);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc3, vc2);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc3, vc2);
__m512 vp2 = _mm512_fmadd_ps(vt2, vc3, vc2);
__m512 vp3 = _mm512_fmadd_ps(vt3, vc3, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vp1 = _mm512_mul_ps(vp1, vt1);
vp2 = _mm512_mul_ps(vp2, vt2);
vp3 = _mm512_mul_ps(vp3, vt3);
vp0 = _mm512_fmadd_ps(vt0, vp0, vt0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vt1);
vp2 = _mm512_fmadd_ps(vt2, vp2, vt2);
vp3 = _mm512_fmadd_ps(vt3, vp3, vt3);
vp0 = _mm512_fmadd_ps(vl0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vl1, vp1, vl1);
vp2 = _mm512_fmadd_ps(vl2, vp2, vl2);
vp3 = _mm512_fmadd_ps(vl3, vp3, vl3);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
__m512 vf0 = _mm512_div_ps(ve0, vd0);
__m512 vf1 = _mm512_div_ps(ve1, vd1);
__m512 vf2 = _mm512_div_ps(ve2, vd2);
__m512 vf3 = _mm512_div_ps(ve3, vd3);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
output += 64;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 6,981 | 38.670455 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-lut16-p3-perm-scalef-div-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-lut16-p3-perm-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_lut16_p3.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_lut16_p3.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_lut16_p3.log2e);
const __m512 vtable = _mm512_load_ps(params->avx512_rr1_lut16_p3.table);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c2);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_lut16_p3.one);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
input += 80;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
const __m512 vz4 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx4), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutexvar_ps(_mm512_castps_si512(vn0), vtable);
const __m512 vl1 = _mm512_permutexvar_ps(_mm512_castps_si512(vn1), vtable);
const __m512 vl2 = _mm512_permutexvar_ps(_mm512_castps_si512(vn2), vtable);
const __m512 vl3 = _mm512_permutexvar_ps(_mm512_castps_si512(vn3), vtable);
const __m512 vl4 = _mm512_permutexvar_ps(_mm512_castps_si512(vn4), vtable);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc3, vc2);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc3, vc2);
__m512 vp2 = _mm512_fmadd_ps(vt2, vc3, vc2);
__m512 vp3 = _mm512_fmadd_ps(vt3, vc3, vc2);
__m512 vp4 = _mm512_fmadd_ps(vt4, vc3, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vp1 = _mm512_mul_ps(vp1, vt1);
vp2 = _mm512_mul_ps(vp2, vt2);
vp3 = _mm512_mul_ps(vp3, vt3);
vp4 = _mm512_mul_ps(vp4, vt4);
vp0 = _mm512_fmadd_ps(vt0, vp0, vt0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vt1);
vp2 = _mm512_fmadd_ps(vt2, vp2, vt2);
vp3 = _mm512_fmadd_ps(vt3, vp3, vt3);
vp4 = _mm512_fmadd_ps(vt4, vp4, vt4);
vp0 = _mm512_fmadd_ps(vl0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vl1, vp1, vl1);
vp2 = _mm512_fmadd_ps(vl2, vp2, vl2);
vp3 = _mm512_fmadd_ps(vl3, vp3, vl3);
vp4 = _mm512_fmadd_ps(vl4, vp4, vl4);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 ve4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
const __m512 vd4 = _mm512_add_ps(ve4, vone);
__m512 vf0 = _mm512_div_ps(ve0, vd0);
__m512 vf1 = _mm512_div_ps(ve1, vd1);
__m512 vf2 = _mm512_div_ps(ve2, vd2);
__m512 vf3 = _mm512_div_ps(ve3, vd3);
__m512 vf4 = _mm512_div_ps(ve4, vd4);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
vf4 = _mm512_mask_sub_ps(vf4, _mm512_testn_epi32_mask(_mm512_castps_si512(vx4), vsign_mask), vone, vf4);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
output += 80;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 7,830 | 40 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-lut16-p3-perm-scalef-div-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-lut16-p3-perm-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x96(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_lut16_p3.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_lut16_p3.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_lut16_p3.log2e);
const __m512 vtable = _mm512_load_ps(params->avx512_rr1_lut16_p3.table);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c2);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_lut16_p3.one);
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
input += 96;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
const __m512 vz4 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx4), vsign_mask));
const __m512 vz5 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx5), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutexvar_ps(_mm512_castps_si512(vn0), vtable);
const __m512 vl1 = _mm512_permutexvar_ps(_mm512_castps_si512(vn1), vtable);
const __m512 vl2 = _mm512_permutexvar_ps(_mm512_castps_si512(vn2), vtable);
const __m512 vl3 = _mm512_permutexvar_ps(_mm512_castps_si512(vn3), vtable);
const __m512 vl4 = _mm512_permutexvar_ps(_mm512_castps_si512(vn4), vtable);
const __m512 vl5 = _mm512_permutexvar_ps(_mm512_castps_si512(vn5), vtable);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vz5);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc3, vc2);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc3, vc2);
__m512 vp2 = _mm512_fmadd_ps(vt2, vc3, vc2);
__m512 vp3 = _mm512_fmadd_ps(vt3, vc3, vc2);
__m512 vp4 = _mm512_fmadd_ps(vt4, vc3, vc2);
__m512 vp5 = _mm512_fmadd_ps(vt5, vc3, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vp1 = _mm512_mul_ps(vp1, vt1);
vp2 = _mm512_mul_ps(vp2, vt2);
vp3 = _mm512_mul_ps(vp3, vt3);
vp4 = _mm512_mul_ps(vp4, vt4);
vp5 = _mm512_mul_ps(vp5, vt5);
vp0 = _mm512_fmadd_ps(vt0, vp0, vt0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vt1);
vp2 = _mm512_fmadd_ps(vt2, vp2, vt2);
vp3 = _mm512_fmadd_ps(vt3, vp3, vt3);
vp4 = _mm512_fmadd_ps(vt4, vp4, vt4);
vp5 = _mm512_fmadd_ps(vt5, vp5, vt5);
vp0 = _mm512_fmadd_ps(vl0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vl1, vp1, vl1);
vp2 = _mm512_fmadd_ps(vl2, vp2, vl2);
vp3 = _mm512_fmadd_ps(vl3, vp3, vl3);
vp4 = _mm512_fmadd_ps(vl4, vp4, vl4);
vp5 = _mm512_fmadd_ps(vl5, vp5, vl5);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 ve4 = _mm512_scalef_ps(vp4, vn4);
const __m512 ve5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
const __m512 vd4 = _mm512_add_ps(ve4, vone);
const __m512 vd5 = _mm512_add_ps(ve5, vone);
__m512 vf0 = _mm512_div_ps(ve0, vd0);
__m512 vf1 = _mm512_div_ps(ve1, vd1);
__m512 vf2 = _mm512_div_ps(ve2, vd2);
__m512 vf3 = _mm512_div_ps(ve3, vd3);
__m512 vf4 = _mm512_div_ps(ve4, vd4);
__m512 vf5 = _mm512_div_ps(ve5, vd5);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
vf4 = _mm512_mask_sub_ps(vf4, _mm512_testn_epi32_mask(_mm512_castps_si512(vx4), vsign_mask), vone, vf4);
vf5 = _mm512_mask_sub_ps(vf5, _mm512_testn_epi32_mask(_mm512_castps_si512(vx5), vsign_mask), vone, vf5);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
output += 96;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 8,679 | 41.135922 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-lut16-p3-perm-scalef-nr1fma-x112.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-lut16-p3-perm-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x112(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_lut16_p3.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_lut16_p3.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_lut16_p3.log2e);
const __m512 vtable = _mm512_load_ps(params->avx512_rr1_lut16_p3.table);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c2);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_lut16_p3.one);
for (; batch >= 112 * sizeof(float); batch -= 112 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
input += 112;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
const __m512 vz4 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx4), vsign_mask));
const __m512 vz5 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx5), vsign_mask));
const __m512 vz6 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx6), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutexvar_ps(_mm512_castps_si512(vn0), vtable);
const __m512 vl1 = _mm512_permutexvar_ps(_mm512_castps_si512(vn1), vtable);
const __m512 vl2 = _mm512_permutexvar_ps(_mm512_castps_si512(vn2), vtable);
const __m512 vl3 = _mm512_permutexvar_ps(_mm512_castps_si512(vn3), vtable);
const __m512 vl4 = _mm512_permutexvar_ps(_mm512_castps_si512(vn4), vtable);
const __m512 vl5 = _mm512_permutexvar_ps(_mm512_castps_si512(vn5), vtable);
const __m512 vl6 = _mm512_permutexvar_ps(_mm512_castps_si512(vn6), vtable);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vz5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vz6);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc3, vc2);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc3, vc2);
__m512 vp2 = _mm512_fmadd_ps(vt2, vc3, vc2);
__m512 vp3 = _mm512_fmadd_ps(vt3, vc3, vc2);
__m512 vp4 = _mm512_fmadd_ps(vt4, vc3, vc2);
__m512 vp5 = _mm512_fmadd_ps(vt5, vc3, vc2);
__m512 vp6 = _mm512_fmadd_ps(vt6, vc3, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vp1 = _mm512_mul_ps(vp1, vt1);
vp2 = _mm512_mul_ps(vp2, vt2);
vp3 = _mm512_mul_ps(vp3, vt3);
vp4 = _mm512_mul_ps(vp4, vt4);
vp5 = _mm512_mul_ps(vp5, vt5);
vp6 = _mm512_mul_ps(vp6, vt6);
vp0 = _mm512_fmadd_ps(vt0, vp0, vt0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vt1);
vp2 = _mm512_fmadd_ps(vt2, vp2, vt2);
vp3 = _mm512_fmadd_ps(vt3, vp3, vt3);
vp4 = _mm512_fmadd_ps(vt4, vp4, vt4);
vp5 = _mm512_fmadd_ps(vt5, vp5, vt5);
vp6 = _mm512_fmadd_ps(vt6, vp6, vt6);
vp0 = _mm512_fmadd_ps(vl0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vl1, vp1, vl1);
vp2 = _mm512_fmadd_ps(vl2, vp2, vl2);
vp3 = _mm512_fmadd_ps(vl3, vp3, vl3);
vp4 = _mm512_fmadd_ps(vl4, vp4, vl4);
vp5 = _mm512_fmadd_ps(vl5, vp5, vl5);
vp6 = _mm512_fmadd_ps(vl6, vp6, vl6);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 ve4 = _mm512_scalef_ps(vp4, vn4);
const __m512 ve5 = _mm512_scalef_ps(vp5, vn5);
const __m512 ve6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
const __m512 vd4 = _mm512_add_ps(ve4, vone);
const __m512 vd5 = _mm512_add_ps(ve5, vone);
const __m512 vd6 = _mm512_add_ps(ve6, vone);
__m512 vr0 = _mm512_rcp14_ps(vd0);
__m512 vr1 = _mm512_rcp14_ps(vd1);
__m512 vr2 = _mm512_rcp14_ps(vd2);
__m512 vr3 = _mm512_rcp14_ps(vd3);
__m512 vr4 = _mm512_rcp14_ps(vd4);
__m512 vr5 = _mm512_rcp14_ps(vd5);
__m512 vr6 = _mm512_rcp14_ps(vd6);
vr0 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
vr5 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr5, vd5, vone), vr5, vr5);
vr6 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr6, vd6, vone), vr6, vr6);
__m512 vf0 = _mm512_mul_ps(ve0, vr0);
__m512 vf1 = _mm512_mul_ps(ve1, vr1);
__m512 vf2 = _mm512_mul_ps(ve2, vr2);
__m512 vf3 = _mm512_mul_ps(ve3, vr3);
__m512 vf4 = _mm512_mul_ps(ve4, vr4);
__m512 vf5 = _mm512_mul_ps(ve5, vr5);
__m512 vf6 = _mm512_mul_ps(ve6, vr6);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
vf4 = _mm512_mask_sub_ps(vf4, _mm512_testn_epi32_mask(_mm512_castps_si512(vx4), vsign_mask), vone, vf4);
vf5 = _mm512_mask_sub_ps(vf5, _mm512_testn_epi32_mask(_mm512_castps_si512(vx5), vsign_mask), vone, vf5);
vf6 = _mm512_mask_sub_ps(vf6, _mm512_testn_epi32_mask(_mm512_castps_si512(vx6), vsign_mask), vone, vf6);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
output += 112;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 10,517 | 42.106557 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-lut16-p3-perm-scalef-nr1fma-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-lut16-p3-perm-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x128(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_lut16_p3.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_lut16_p3.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_lut16_p3.log2e);
const __m512 vtable = _mm512_load_ps(params->avx512_rr1_lut16_p3.table);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c2);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_lut16_p3.one);
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
input += 128;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
const __m512 vz4 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx4), vsign_mask));
const __m512 vz5 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx5), vsign_mask));
const __m512 vz6 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx6), vsign_mask));
const __m512 vz7 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx7), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutexvar_ps(_mm512_castps_si512(vn0), vtable);
const __m512 vl1 = _mm512_permutexvar_ps(_mm512_castps_si512(vn1), vtable);
const __m512 vl2 = _mm512_permutexvar_ps(_mm512_castps_si512(vn2), vtable);
const __m512 vl3 = _mm512_permutexvar_ps(_mm512_castps_si512(vn3), vtable);
const __m512 vl4 = _mm512_permutexvar_ps(_mm512_castps_si512(vn4), vtable);
const __m512 vl5 = _mm512_permutexvar_ps(_mm512_castps_si512(vn5), vtable);
const __m512 vl6 = _mm512_permutexvar_ps(_mm512_castps_si512(vn6), vtable);
const __m512 vl7 = _mm512_permutexvar_ps(_mm512_castps_si512(vn7), vtable);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vz5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vz6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2, vz7);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc3, vc2);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc3, vc2);
__m512 vp2 = _mm512_fmadd_ps(vt2, vc3, vc2);
__m512 vp3 = _mm512_fmadd_ps(vt3, vc3, vc2);
__m512 vp4 = _mm512_fmadd_ps(vt4, vc3, vc2);
__m512 vp5 = _mm512_fmadd_ps(vt5, vc3, vc2);
__m512 vp6 = _mm512_fmadd_ps(vt6, vc3, vc2);
__m512 vp7 = _mm512_fmadd_ps(vt7, vc3, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vp1 = _mm512_mul_ps(vp1, vt1);
vp2 = _mm512_mul_ps(vp2, vt2);
vp3 = _mm512_mul_ps(vp3, vt3);
vp4 = _mm512_mul_ps(vp4, vt4);
vp5 = _mm512_mul_ps(vp5, vt5);
vp6 = _mm512_mul_ps(vp6, vt6);
vp7 = _mm512_mul_ps(vp7, vt7);
vp0 = _mm512_fmadd_ps(vt0, vp0, vt0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vt1);
vp2 = _mm512_fmadd_ps(vt2, vp2, vt2);
vp3 = _mm512_fmadd_ps(vt3, vp3, vt3);
vp4 = _mm512_fmadd_ps(vt4, vp4, vt4);
vp5 = _mm512_fmadd_ps(vt5, vp5, vt5);
vp6 = _mm512_fmadd_ps(vt6, vp6, vt6);
vp7 = _mm512_fmadd_ps(vt7, vp7, vt7);
vp0 = _mm512_fmadd_ps(vl0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vl1, vp1, vl1);
vp2 = _mm512_fmadd_ps(vl2, vp2, vl2);
vp3 = _mm512_fmadd_ps(vl3, vp3, vl3);
vp4 = _mm512_fmadd_ps(vl4, vp4, vl4);
vp5 = _mm512_fmadd_ps(vl5, vp5, vl5);
vp6 = _mm512_fmadd_ps(vl6, vp6, vl6);
vp7 = _mm512_fmadd_ps(vl7, vp7, vl7);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 ve4 = _mm512_scalef_ps(vp4, vn4);
const __m512 ve5 = _mm512_scalef_ps(vp5, vn5);
const __m512 ve6 = _mm512_scalef_ps(vp6, vn6);
const __m512 ve7 = _mm512_scalef_ps(vp7, vn7);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
const __m512 vd4 = _mm512_add_ps(ve4, vone);
const __m512 vd5 = _mm512_add_ps(ve5, vone);
const __m512 vd6 = _mm512_add_ps(ve6, vone);
const __m512 vd7 = _mm512_add_ps(ve7, vone);
__m512 vr0 = _mm512_rcp14_ps(vd0);
__m512 vr1 = _mm512_rcp14_ps(vd1);
__m512 vr2 = _mm512_rcp14_ps(vd2);
__m512 vr3 = _mm512_rcp14_ps(vd3);
__m512 vr4 = _mm512_rcp14_ps(vd4);
__m512 vr5 = _mm512_rcp14_ps(vd5);
__m512 vr6 = _mm512_rcp14_ps(vd6);
__m512 vr7 = _mm512_rcp14_ps(vd7);
vr0 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
vr5 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr5, vd5, vone), vr5, vr5);
vr6 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr6, vd6, vone), vr6, vr6);
vr7 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr7, vd7, vone), vr7, vr7);
__m512 vf0 = _mm512_mul_ps(ve0, vr0);
__m512 vf1 = _mm512_mul_ps(ve1, vr1);
__m512 vf2 = _mm512_mul_ps(ve2, vr2);
__m512 vf3 = _mm512_mul_ps(ve3, vr3);
__m512 vf4 = _mm512_mul_ps(ve4, vr4);
__m512 vf5 = _mm512_mul_ps(ve5, vr5);
__m512 vf6 = _mm512_mul_ps(ve6, vr6);
__m512 vf7 = _mm512_mul_ps(ve7, vr7);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
vf4 = _mm512_mask_sub_ps(vf4, _mm512_testn_epi32_mask(_mm512_castps_si512(vx4), vsign_mask), vone, vf4);
vf5 = _mm512_mask_sub_ps(vf5, _mm512_testn_epi32_mask(_mm512_castps_si512(vx5), vsign_mask), vone, vf5);
vf6 = _mm512_mask_sub_ps(vf6, _mm512_testn_epi32_mask(_mm512_castps_si512(vx6), vsign_mask), vone, vf6);
vf7 = _mm512_mask_sub_ps(vf7, _mm512_testn_epi32_mask(_mm512_castps_si512(vx7), vsign_mask), vone, vf7);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
output += 128;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 11,478 | 42.980843 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-lut16-p3-perm-scalef-nr1fma-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-lut16-p3-perm-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_lut16_p3.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_lut16_p3.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_lut16_p3.log2e);
const __m512 vtable = _mm512_load_ps(params->avx512_rr1_lut16_p3.table);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c2);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_lut16_p3.one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 3,684 | 34.432692 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-lut16-p3-perm-scalef-nr1fma-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-lut16-p3-perm-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_lut16_p3.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_lut16_p3.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_lut16_p3.log2e);
const __m512 vtable = _mm512_load_ps(params->avx512_rr1_lut16_p3.table);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c2);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_lut16_p3.one);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
input += 32;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutexvar_ps(_mm512_castps_si512(vn0), vtable);
const __m512 vl1 = _mm512_permutexvar_ps(_mm512_castps_si512(vn1), vtable);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc3, vc2);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc3, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vp1 = _mm512_mul_ps(vp1, vt1);
vp0 = _mm512_fmadd_ps(vt0, vp0, vt0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vt1);
vp0 = _mm512_fmadd_ps(vl0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vl1, vp1, vl1);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
__m512 vr0 = _mm512_rcp14_ps(vd0);
__m512 vr1 = _mm512_rcp14_ps(vd1);
vr0 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
__m512 vf0 = _mm512_mul_ps(ve0, vr0);
__m512 vf1 = _mm512_mul_ps(ve1, vr1);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 5,717 | 34.962264 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-lut16-p3-perm-scalef-nr1fma-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-lut16-p3-perm-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_lut16_p3.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_lut16_p3.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_lut16_p3.log2e);
const __m512 vtable = _mm512_load_ps(params->avx512_rr1_lut16_p3.table);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c2);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_lut16_p3.one);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
input += 48;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutexvar_ps(_mm512_castps_si512(vn0), vtable);
const __m512 vl1 = _mm512_permutexvar_ps(_mm512_castps_si512(vn1), vtable);
const __m512 vl2 = _mm512_permutexvar_ps(_mm512_castps_si512(vn2), vtable);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc3, vc2);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc3, vc2);
__m512 vp2 = _mm512_fmadd_ps(vt2, vc3, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vp1 = _mm512_mul_ps(vp1, vt1);
vp2 = _mm512_mul_ps(vp2, vt2);
vp0 = _mm512_fmadd_ps(vt0, vp0, vt0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vt1);
vp2 = _mm512_fmadd_ps(vt2, vp2, vt2);
vp0 = _mm512_fmadd_ps(vl0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vl1, vp1, vl1);
vp2 = _mm512_fmadd_ps(vl2, vp2, vl2);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
__m512 vr0 = _mm512_rcp14_ps(vd0);
__m512 vr1 = _mm512_rcp14_ps(vd1);
__m512 vr2 = _mm512_rcp14_ps(vd2);
vr0 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
__m512 vf0 = _mm512_mul_ps(ve0, vr0);
__m512 vf1 = _mm512_mul_ps(ve1, vr1);
__m512 vf2 = _mm512_mul_ps(ve2, vr2);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
output += 48;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 6,676 | 36.9375 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-lut16-p3-perm-scalef-nr1fma-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-lut16-p3-perm-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_lut16_p3.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_lut16_p3.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_lut16_p3.log2e);
const __m512 vtable = _mm512_load_ps(params->avx512_rr1_lut16_p3.table);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c2);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_lut16_p3.one);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
input += 64;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutexvar_ps(_mm512_castps_si512(vn0), vtable);
const __m512 vl1 = _mm512_permutexvar_ps(_mm512_castps_si512(vn1), vtable);
const __m512 vl2 = _mm512_permutexvar_ps(_mm512_castps_si512(vn2), vtable);
const __m512 vl3 = _mm512_permutexvar_ps(_mm512_castps_si512(vn3), vtable);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc3, vc2);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc3, vc2);
__m512 vp2 = _mm512_fmadd_ps(vt2, vc3, vc2);
__m512 vp3 = _mm512_fmadd_ps(vt3, vc3, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vp1 = _mm512_mul_ps(vp1, vt1);
vp2 = _mm512_mul_ps(vp2, vt2);
vp3 = _mm512_mul_ps(vp3, vt3);
vp0 = _mm512_fmadd_ps(vt0, vp0, vt0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vt1);
vp2 = _mm512_fmadd_ps(vt2, vp2, vt2);
vp3 = _mm512_fmadd_ps(vt3, vp3, vt3);
vp0 = _mm512_fmadd_ps(vl0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vl1, vp1, vl1);
vp2 = _mm512_fmadd_ps(vl2, vp2, vl2);
vp3 = _mm512_fmadd_ps(vl3, vp3, vl3);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
__m512 vr0 = _mm512_rcp14_ps(vd0);
__m512 vr1 = _mm512_rcp14_ps(vd1);
__m512 vr2 = _mm512_rcp14_ps(vd2);
__m512 vr3 = _mm512_rcp14_ps(vd3);
vr0 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
__m512 vf0 = _mm512_mul_ps(ve0, vr0);
__m512 vf1 = _mm512_mul_ps(ve1, vr1);
__m512 vf2 = _mm512_mul_ps(ve2, vr2);
__m512 vf3 = _mm512_mul_ps(ve3, vr3);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
output += 64;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 7,635 | 38.564767 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-lut16-p3-perm-scalef-nr1fma-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-lut16-p3-perm-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_lut16_p3.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_lut16_p3.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_lut16_p3.log2e);
const __m512 vtable = _mm512_load_ps(params->avx512_rr1_lut16_p3.table);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c2);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_lut16_p3.one);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
input += 80;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
const __m512 vz4 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx4), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutexvar_ps(_mm512_castps_si512(vn0), vtable);
const __m512 vl1 = _mm512_permutexvar_ps(_mm512_castps_si512(vn1), vtable);
const __m512 vl2 = _mm512_permutexvar_ps(_mm512_castps_si512(vn2), vtable);
const __m512 vl3 = _mm512_permutexvar_ps(_mm512_castps_si512(vn3), vtable);
const __m512 vl4 = _mm512_permutexvar_ps(_mm512_castps_si512(vn4), vtable);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc3, vc2);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc3, vc2);
__m512 vp2 = _mm512_fmadd_ps(vt2, vc3, vc2);
__m512 vp3 = _mm512_fmadd_ps(vt3, vc3, vc2);
__m512 vp4 = _mm512_fmadd_ps(vt4, vc3, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vp1 = _mm512_mul_ps(vp1, vt1);
vp2 = _mm512_mul_ps(vp2, vt2);
vp3 = _mm512_mul_ps(vp3, vt3);
vp4 = _mm512_mul_ps(vp4, vt4);
vp0 = _mm512_fmadd_ps(vt0, vp0, vt0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vt1);
vp2 = _mm512_fmadd_ps(vt2, vp2, vt2);
vp3 = _mm512_fmadd_ps(vt3, vp3, vt3);
vp4 = _mm512_fmadd_ps(vt4, vp4, vt4);
vp0 = _mm512_fmadd_ps(vl0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vl1, vp1, vl1);
vp2 = _mm512_fmadd_ps(vl2, vp2, vl2);
vp3 = _mm512_fmadd_ps(vl3, vp3, vl3);
vp4 = _mm512_fmadd_ps(vl4, vp4, vl4);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 ve4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
const __m512 vd4 = _mm512_add_ps(ve4, vone);
__m512 vr0 = _mm512_rcp14_ps(vd0);
__m512 vr1 = _mm512_rcp14_ps(vd1);
__m512 vr2 = _mm512_rcp14_ps(vd2);
__m512 vr3 = _mm512_rcp14_ps(vd3);
__m512 vr4 = _mm512_rcp14_ps(vd4);
vr0 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
__m512 vf0 = _mm512_mul_ps(ve0, vr0);
__m512 vf1 = _mm512_mul_ps(ve1, vr1);
__m512 vf2 = _mm512_mul_ps(ve2, vr2);
__m512 vf3 = _mm512_mul_ps(ve3, vr3);
__m512 vf4 = _mm512_mul_ps(ve4, vr4);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
vf4 = _mm512_mask_sub_ps(vf4, _mm512_testn_epi32_mask(_mm512_castps_si512(vx4), vsign_mask), vone, vf4);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
output += 80;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 8,594 | 39.928571 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-lut16-p3-perm-scalef-nr1fma-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-lut16-p3-perm-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x96(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_lut16_p3.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_lut16_p3.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_lut16_p3.log2e);
const __m512 vtable = _mm512_load_ps(params->avx512_rr1_lut16_p3.table);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c2);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_lut16_p3.one);
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
input += 96;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
const __m512 vz4 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx4), vsign_mask));
const __m512 vz5 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx5), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutexvar_ps(_mm512_castps_si512(vn0), vtable);
const __m512 vl1 = _mm512_permutexvar_ps(_mm512_castps_si512(vn1), vtable);
const __m512 vl2 = _mm512_permutexvar_ps(_mm512_castps_si512(vn2), vtable);
const __m512 vl3 = _mm512_permutexvar_ps(_mm512_castps_si512(vn3), vtable);
const __m512 vl4 = _mm512_permutexvar_ps(_mm512_castps_si512(vn4), vtable);
const __m512 vl5 = _mm512_permutexvar_ps(_mm512_castps_si512(vn5), vtable);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vz5);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc3, vc2);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc3, vc2);
__m512 vp2 = _mm512_fmadd_ps(vt2, vc3, vc2);
__m512 vp3 = _mm512_fmadd_ps(vt3, vc3, vc2);
__m512 vp4 = _mm512_fmadd_ps(vt4, vc3, vc2);
__m512 vp5 = _mm512_fmadd_ps(vt5, vc3, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vp1 = _mm512_mul_ps(vp1, vt1);
vp2 = _mm512_mul_ps(vp2, vt2);
vp3 = _mm512_mul_ps(vp3, vt3);
vp4 = _mm512_mul_ps(vp4, vt4);
vp5 = _mm512_mul_ps(vp5, vt5);
vp0 = _mm512_fmadd_ps(vt0, vp0, vt0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vt1);
vp2 = _mm512_fmadd_ps(vt2, vp2, vt2);
vp3 = _mm512_fmadd_ps(vt3, vp3, vt3);
vp4 = _mm512_fmadd_ps(vt4, vp4, vt4);
vp5 = _mm512_fmadd_ps(vt5, vp5, vt5);
vp0 = _mm512_fmadd_ps(vl0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vl1, vp1, vl1);
vp2 = _mm512_fmadd_ps(vl2, vp2, vl2);
vp3 = _mm512_fmadd_ps(vl3, vp3, vl3);
vp4 = _mm512_fmadd_ps(vl4, vp4, vl4);
vp5 = _mm512_fmadd_ps(vl5, vp5, vl5);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 ve4 = _mm512_scalef_ps(vp4, vn4);
const __m512 ve5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
const __m512 vd4 = _mm512_add_ps(ve4, vone);
const __m512 vd5 = _mm512_add_ps(ve5, vone);
__m512 vr0 = _mm512_rcp14_ps(vd0);
__m512 vr1 = _mm512_rcp14_ps(vd1);
__m512 vr2 = _mm512_rcp14_ps(vd2);
__m512 vr3 = _mm512_rcp14_ps(vd3);
__m512 vr4 = _mm512_rcp14_ps(vd4);
__m512 vr5 = _mm512_rcp14_ps(vd5);
vr0 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
vr5 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr5, vd5, vone), vr5, vr5);
__m512 vf0 = _mm512_mul_ps(ve0, vr0);
__m512 vf1 = _mm512_mul_ps(ve1, vr1);
__m512 vf2 = _mm512_mul_ps(ve2, vr2);
__m512 vf3 = _mm512_mul_ps(ve3, vr3);
__m512 vf4 = _mm512_mul_ps(ve4, vr4);
__m512 vf5 = _mm512_mul_ps(ve5, vr5);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
vf4 = _mm512_mask_sub_ps(vf4, _mm512_testn_epi32_mask(_mm512_castps_si512(vx4), vsign_mask), vone, vf4);
vf5 = _mm512_mask_sub_ps(vf5, _mm512_testn_epi32_mask(_mm512_castps_si512(vx5), vsign_mask), vone, vf5);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
output += 96;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutexvar_ps(_mm512_castps_si512(vn), vtable);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vt, vc3, vc2);
vp = _mm512_mul_ps(vp, vt);
vp = _mm512_fmadd_ps(vt, vp, vt);
vp = _mm512_fmadd_ps(vl, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 9,553 | 41.088106 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-p5-scalef-div-x112.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_div_x112(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_p5.sign_mask);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_p5.one);
for (; batch >= 112 * sizeof(float); batch -= 112 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
input += 112;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
const __m512 vz4 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx4), vsign_mask));
const __m512 vz5 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx5), vsign_mask));
const __m512 vz6 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx6), vsign_mask));
__m512 vn0 = _mm512_mul_ps(vz0, vlog2e);
__m512 vn1 = _mm512_mul_ps(vz1, vlog2e);
__m512 vn2 = _mm512_mul_ps(vz2, vlog2e);
__m512 vn3 = _mm512_mul_ps(vz3, vlog2e);
__m512 vn4 = _mm512_mul_ps(vz4, vlog2e);
__m512 vn5 = _mm512_mul_ps(vz5, vlog2e);
__m512 vn6 = _mm512_mul_ps(vz6, vlog2e);
vn0 = _mm512_roundscale_ps(vn0, 0);
vn1 = _mm512_roundscale_ps(vn1, 0);
vn2 = _mm512_roundscale_ps(vn2, 0);
vn3 = _mm512_roundscale_ps(vn3, 0);
vn4 = _mm512_roundscale_ps(vn4, 0);
vn5 = _mm512_roundscale_ps(vn5, 0);
vn6 = _mm512_roundscale_ps(vn6, 0);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vz5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vz6);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vone);
vp1 = _mm512_fmadd_ps(vp1, vt1, vone);
vp2 = _mm512_fmadd_ps(vp2, vt2, vone);
vp3 = _mm512_fmadd_ps(vp3, vt3, vone);
vp4 = _mm512_fmadd_ps(vp4, vt4, vone);
vp5 = _mm512_fmadd_ps(vp5, vt5, vone);
vp6 = _mm512_fmadd_ps(vp6, vt6, vone);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 ve4 = _mm512_scalef_ps(vp4, vn4);
const __m512 ve5 = _mm512_scalef_ps(vp5, vn5);
const __m512 ve6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
const __m512 vd4 = _mm512_add_ps(ve4, vone);
const __m512 vd5 = _mm512_add_ps(ve5, vone);
const __m512 vd6 = _mm512_add_ps(ve6, vone);
__m512 vf0 = _mm512_div_ps(ve0, vd0);
__m512 vf1 = _mm512_div_ps(ve1, vd1);
__m512 vf2 = _mm512_div_ps(ve2, vd2);
__m512 vf3 = _mm512_div_ps(ve3, vd3);
__m512 vf4 = _mm512_div_ps(ve4, vd4);
__m512 vf5 = _mm512_div_ps(ve5, vd5);
__m512 vf6 = _mm512_div_ps(ve6, vd6);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
vf4 = _mm512_mask_sub_ps(vf4, _mm512_testn_epi32_mask(_mm512_castps_si512(vx4), vsign_mask), vone, vf4);
vf5 = _mm512_mask_sub_ps(vf5, _mm512_testn_epi32_mask(_mm512_castps_si512(vx5), vsign_mask), vone, vf5);
vf6 = _mm512_mask_sub_ps(vf6, _mm512_testn_epi32_mask(_mm512_castps_si512(vx6), vsign_mask), vone, vf6);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
output += 112;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 9,060 | 40.186364 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-p5-scalef-div-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_div_x128(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_p5.sign_mask);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_p5.one);
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
input += 128;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
const __m512 vz4 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx4), vsign_mask));
const __m512 vz5 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx5), vsign_mask));
const __m512 vz6 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx6), vsign_mask));
const __m512 vz7 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx7), vsign_mask));
__m512 vn0 = _mm512_mul_ps(vz0, vlog2e);
__m512 vn1 = _mm512_mul_ps(vz1, vlog2e);
__m512 vn2 = _mm512_mul_ps(vz2, vlog2e);
__m512 vn3 = _mm512_mul_ps(vz3, vlog2e);
__m512 vn4 = _mm512_mul_ps(vz4, vlog2e);
__m512 vn5 = _mm512_mul_ps(vz5, vlog2e);
__m512 vn6 = _mm512_mul_ps(vz6, vlog2e);
__m512 vn7 = _mm512_mul_ps(vz7, vlog2e);
vn0 = _mm512_roundscale_ps(vn0, 0);
vn1 = _mm512_roundscale_ps(vn1, 0);
vn2 = _mm512_roundscale_ps(vn2, 0);
vn3 = _mm512_roundscale_ps(vn3, 0);
vn4 = _mm512_roundscale_ps(vn4, 0);
vn5 = _mm512_roundscale_ps(vn5, 0);
vn6 = _mm512_roundscale_ps(vn6, 0);
vn7 = _mm512_roundscale_ps(vn7, 0);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vz5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vz6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2, vz7);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vone);
vp1 = _mm512_fmadd_ps(vp1, vt1, vone);
vp2 = _mm512_fmadd_ps(vp2, vt2, vone);
vp3 = _mm512_fmadd_ps(vp3, vt3, vone);
vp4 = _mm512_fmadd_ps(vp4, vt4, vone);
vp5 = _mm512_fmadd_ps(vp5, vt5, vone);
vp6 = _mm512_fmadd_ps(vp6, vt6, vone);
vp7 = _mm512_fmadd_ps(vp7, vt7, vone);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 ve4 = _mm512_scalef_ps(vp4, vn4);
const __m512 ve5 = _mm512_scalef_ps(vp5, vn5);
const __m512 ve6 = _mm512_scalef_ps(vp6, vn6);
const __m512 ve7 = _mm512_scalef_ps(vp7, vn7);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
const __m512 vd4 = _mm512_add_ps(ve4, vone);
const __m512 vd5 = _mm512_add_ps(ve5, vone);
const __m512 vd6 = _mm512_add_ps(ve6, vone);
const __m512 vd7 = _mm512_add_ps(ve7, vone);
__m512 vf0 = _mm512_div_ps(ve0, vd0);
__m512 vf1 = _mm512_div_ps(ve1, vd1);
__m512 vf2 = _mm512_div_ps(ve2, vd2);
__m512 vf3 = _mm512_div_ps(ve3, vd3);
__m512 vf4 = _mm512_div_ps(ve4, vd4);
__m512 vf5 = _mm512_div_ps(ve5, vd5);
__m512 vf6 = _mm512_div_ps(ve6, vd6);
__m512 vf7 = _mm512_div_ps(ve7, vd7);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
vf4 = _mm512_mask_sub_ps(vf4, _mm512_testn_epi32_mask(_mm512_castps_si512(vx4), vsign_mask), vone, vf4);
vf5 = _mm512_mask_sub_ps(vf5, _mm512_testn_epi32_mask(_mm512_castps_si512(vx5), vsign_mask), vone, vf5);
vf6 = _mm512_mask_sub_ps(vf6, _mm512_testn_epi32_mask(_mm512_castps_si512(vx6), vsign_mask), vone, vf6);
vf7 = _mm512_mask_sub_ps(vf7, _mm512_testn_epi32_mask(_mm512_castps_si512(vx7), vsign_mask), vone, vf7);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
output += 128;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 9,863 | 40.974468 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-p5-scalef-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_p5.sign_mask);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_p5.one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 3,336 | 33.402062 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-p5-scalef-div-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_div_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_p5.sign_mask);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_p5.one);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
input += 32;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
__m512 vn0 = _mm512_mul_ps(vz0, vlog2e);
__m512 vn1 = _mm512_mul_ps(vz1, vlog2e);
vn0 = _mm512_roundscale_ps(vn0, 0);
vn1 = _mm512_roundscale_ps(vn1, 0);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vone);
vp1 = _mm512_fmadd_ps(vp1, vt1, vone);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
__m512 vf0 = _mm512_div_ps(ve0, vd0);
__m512 vf1 = _mm512_div_ps(ve1, vd1);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 5,050 | 33.834483 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-p5-scalef-div-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_div_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_p5.sign_mask);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_p5.one);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
input += 48;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
__m512 vn0 = _mm512_mul_ps(vz0, vlog2e);
__m512 vn1 = _mm512_mul_ps(vz1, vlog2e);
__m512 vn2 = _mm512_mul_ps(vz2, vlog2e);
vn0 = _mm512_roundscale_ps(vn0, 0);
vn1 = _mm512_roundscale_ps(vn1, 0);
vn2 = _mm512_roundscale_ps(vn2, 0);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vone);
vp1 = _mm512_fmadd_ps(vp1, vt1, vone);
vp2 = _mm512_fmadd_ps(vp2, vt2, vone);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
__m512 vf0 = _mm512_div_ps(ve0, vd0);
__m512 vf1 = _mm512_div_ps(ve1, vd1);
__m512 vf2 = _mm512_div_ps(ve2, vd2);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
output += 48;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 5,851 | 35.575 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-p5-scalef-div-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_div_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_p5.sign_mask);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_p5.one);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
input += 64;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
__m512 vn0 = _mm512_mul_ps(vz0, vlog2e);
__m512 vn1 = _mm512_mul_ps(vz1, vlog2e);
__m512 vn2 = _mm512_mul_ps(vz2, vlog2e);
__m512 vn3 = _mm512_mul_ps(vz3, vlog2e);
vn0 = _mm512_roundscale_ps(vn0, 0);
vn1 = _mm512_roundscale_ps(vn1, 0);
vn2 = _mm512_roundscale_ps(vn2, 0);
vn3 = _mm512_roundscale_ps(vn3, 0);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vone);
vp1 = _mm512_fmadd_ps(vp1, vt1, vone);
vp2 = _mm512_fmadd_ps(vp2, vt2, vone);
vp3 = _mm512_fmadd_ps(vp3, vt3, vone);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
__m512 vf0 = _mm512_div_ps(ve0, vd0);
__m512 vf1 = _mm512_div_ps(ve1, vd1);
__m512 vf2 = _mm512_div_ps(ve2, vd2);
__m512 vf3 = _mm512_div_ps(ve3, vd3);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
output += 64;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 6,652 | 37.017143 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-p5-scalef-div-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_div_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_p5.sign_mask);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_p5.one);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
input += 80;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
const __m512 vz4 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx4), vsign_mask));
__m512 vn0 = _mm512_mul_ps(vz0, vlog2e);
__m512 vn1 = _mm512_mul_ps(vz1, vlog2e);
__m512 vn2 = _mm512_mul_ps(vz2, vlog2e);
__m512 vn3 = _mm512_mul_ps(vz3, vlog2e);
__m512 vn4 = _mm512_mul_ps(vz4, vlog2e);
vn0 = _mm512_roundscale_ps(vn0, 0);
vn1 = _mm512_roundscale_ps(vn1, 0);
vn2 = _mm512_roundscale_ps(vn2, 0);
vn3 = _mm512_roundscale_ps(vn3, 0);
vn4 = _mm512_roundscale_ps(vn4, 0);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vone);
vp1 = _mm512_fmadd_ps(vp1, vt1, vone);
vp2 = _mm512_fmadd_ps(vp2, vt2, vone);
vp3 = _mm512_fmadd_ps(vp3, vt3, vone);
vp4 = _mm512_fmadd_ps(vp4, vt4, vone);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 ve4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
const __m512 vd4 = _mm512_add_ps(ve4, vone);
__m512 vf0 = _mm512_div_ps(ve0, vd0);
__m512 vf1 = _mm512_div_ps(ve1, vd1);
__m512 vf2 = _mm512_div_ps(ve2, vd2);
__m512 vf3 = _mm512_div_ps(ve3, vd3);
__m512 vf4 = _mm512_div_ps(ve4, vd4);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
vf4 = _mm512_mask_sub_ps(vf4, _mm512_testn_epi32_mask(_mm512_castps_si512(vx4), vsign_mask), vone, vf4);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
output += 80;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 7,453 | 38.231579 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-p5-scalef-div-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_div_x96(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_p5.sign_mask);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_p5.one);
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
input += 96;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
const __m512 vz4 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx4), vsign_mask));
const __m512 vz5 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx5), vsign_mask));
__m512 vn0 = _mm512_mul_ps(vz0, vlog2e);
__m512 vn1 = _mm512_mul_ps(vz1, vlog2e);
__m512 vn2 = _mm512_mul_ps(vz2, vlog2e);
__m512 vn3 = _mm512_mul_ps(vz3, vlog2e);
__m512 vn4 = _mm512_mul_ps(vz4, vlog2e);
__m512 vn5 = _mm512_mul_ps(vz5, vlog2e);
vn0 = _mm512_roundscale_ps(vn0, 0);
vn1 = _mm512_roundscale_ps(vn1, 0);
vn2 = _mm512_roundscale_ps(vn2, 0);
vn3 = _mm512_roundscale_ps(vn3, 0);
vn4 = _mm512_roundscale_ps(vn4, 0);
vn5 = _mm512_roundscale_ps(vn5, 0);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vz5);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vone);
vp1 = _mm512_fmadd_ps(vp1, vt1, vone);
vp2 = _mm512_fmadd_ps(vp2, vt2, vone);
vp3 = _mm512_fmadd_ps(vp3, vt3, vone);
vp4 = _mm512_fmadd_ps(vp4, vt4, vone);
vp5 = _mm512_fmadd_ps(vp5, vt5, vone);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 ve4 = _mm512_scalef_ps(vp4, vn4);
const __m512 ve5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
const __m512 vd4 = _mm512_add_ps(ve4, vone);
const __m512 vd5 = _mm512_add_ps(ve5, vone);
__m512 vf0 = _mm512_div_ps(ve0, vd0);
__m512 vf1 = _mm512_div_ps(ve1, vd1);
__m512 vf2 = _mm512_div_ps(ve2, vd2);
__m512 vf3 = _mm512_div_ps(ve3, vd3);
__m512 vf4 = _mm512_div_ps(ve4, vd4);
__m512 vf5 = _mm512_div_ps(ve5, vd5);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
vf4 = _mm512_mask_sub_ps(vf4, _mm512_testn_epi32_mask(_mm512_castps_si512(vx4), vsign_mask), vone, vf4);
vf5 = _mm512_mask_sub_ps(vf5, _mm512_testn_epi32_mask(_mm512_castps_si512(vx5), vsign_mask), vone, vf5);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
output += 96;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 8,254 | 39.268293 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-p5-scalef-nr1fma-x112.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x112(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_p5.sign_mask);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_p5.one);
for (; batch >= 112 * sizeof(float); batch -= 112 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
input += 112;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
const __m512 vz4 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx4), vsign_mask));
const __m512 vz5 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx5), vsign_mask));
const __m512 vz6 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx6), vsign_mask));
__m512 vn0 = _mm512_mul_ps(vz0, vlog2e);
__m512 vn1 = _mm512_mul_ps(vz1, vlog2e);
__m512 vn2 = _mm512_mul_ps(vz2, vlog2e);
__m512 vn3 = _mm512_mul_ps(vz3, vlog2e);
__m512 vn4 = _mm512_mul_ps(vz4, vlog2e);
__m512 vn5 = _mm512_mul_ps(vz5, vlog2e);
__m512 vn6 = _mm512_mul_ps(vz6, vlog2e);
vn0 = _mm512_roundscale_ps(vn0, 0);
vn1 = _mm512_roundscale_ps(vn1, 0);
vn2 = _mm512_roundscale_ps(vn2, 0);
vn3 = _mm512_roundscale_ps(vn3, 0);
vn4 = _mm512_roundscale_ps(vn4, 0);
vn5 = _mm512_roundscale_ps(vn5, 0);
vn6 = _mm512_roundscale_ps(vn6, 0);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vz5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vz6);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vone);
vp1 = _mm512_fmadd_ps(vp1, vt1, vone);
vp2 = _mm512_fmadd_ps(vp2, vt2, vone);
vp3 = _mm512_fmadd_ps(vp3, vt3, vone);
vp4 = _mm512_fmadd_ps(vp4, vt4, vone);
vp5 = _mm512_fmadd_ps(vp5, vt5, vone);
vp6 = _mm512_fmadd_ps(vp6, vt6, vone);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 ve4 = _mm512_scalef_ps(vp4, vn4);
const __m512 ve5 = _mm512_scalef_ps(vp5, vn5);
const __m512 ve6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
const __m512 vd4 = _mm512_add_ps(ve4, vone);
const __m512 vd5 = _mm512_add_ps(ve5, vone);
const __m512 vd6 = _mm512_add_ps(ve6, vone);
__m512 vr0 = _mm512_rcp14_ps(vd0);
__m512 vr1 = _mm512_rcp14_ps(vd1);
__m512 vr2 = _mm512_rcp14_ps(vd2);
__m512 vr3 = _mm512_rcp14_ps(vd3);
__m512 vr4 = _mm512_rcp14_ps(vd4);
__m512 vr5 = _mm512_rcp14_ps(vd5);
__m512 vr6 = _mm512_rcp14_ps(vd6);
vr0 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
vr5 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr5, vd5, vone), vr5, vr5);
vr6 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr6, vd6, vone), vr6, vr6);
__m512 vf0 = _mm512_mul_ps(ve0, vr0);
__m512 vf1 = _mm512_mul_ps(ve1, vr1);
__m512 vf2 = _mm512_mul_ps(ve2, vr2);
__m512 vf3 = _mm512_mul_ps(ve3, vr3);
__m512 vf4 = _mm512_mul_ps(ve4, vr4);
__m512 vf5 = _mm512_mul_ps(ve5, vr5);
__m512 vf6 = _mm512_mul_ps(ve6, vr6);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
vf4 = _mm512_mask_sub_ps(vf4, _mm512_testn_epi32_mask(_mm512_castps_si512(vx4), vsign_mask), vone, vf4);
vf5 = _mm512_mask_sub_ps(vf5, _mm512_testn_epi32_mask(_mm512_castps_si512(vx5), vsign_mask), vone, vf5);
vf6 = _mm512_mask_sub_ps(vf6, _mm512_testn_epi32_mask(_mm512_castps_si512(vx6), vsign_mask), vone, vf6);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
output += 112;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 10,044 | 40.337449 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-p5-scalef-nr1fma-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x128(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_p5.sign_mask);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_p5.one);
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
input += 128;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
const __m512 vz4 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx4), vsign_mask));
const __m512 vz5 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx5), vsign_mask));
const __m512 vz6 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx6), vsign_mask));
const __m512 vz7 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx7), vsign_mask));
__m512 vn0 = _mm512_mul_ps(vz0, vlog2e);
__m512 vn1 = _mm512_mul_ps(vz1, vlog2e);
__m512 vn2 = _mm512_mul_ps(vz2, vlog2e);
__m512 vn3 = _mm512_mul_ps(vz3, vlog2e);
__m512 vn4 = _mm512_mul_ps(vz4, vlog2e);
__m512 vn5 = _mm512_mul_ps(vz5, vlog2e);
__m512 vn6 = _mm512_mul_ps(vz6, vlog2e);
__m512 vn7 = _mm512_mul_ps(vz7, vlog2e);
vn0 = _mm512_roundscale_ps(vn0, 0);
vn1 = _mm512_roundscale_ps(vn1, 0);
vn2 = _mm512_roundscale_ps(vn2, 0);
vn3 = _mm512_roundscale_ps(vn3, 0);
vn4 = _mm512_roundscale_ps(vn4, 0);
vn5 = _mm512_roundscale_ps(vn5, 0);
vn6 = _mm512_roundscale_ps(vn6, 0);
vn7 = _mm512_roundscale_ps(vn7, 0);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vz5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vz6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2, vz7);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vone);
vp1 = _mm512_fmadd_ps(vp1, vt1, vone);
vp2 = _mm512_fmadd_ps(vp2, vt2, vone);
vp3 = _mm512_fmadd_ps(vp3, vt3, vone);
vp4 = _mm512_fmadd_ps(vp4, vt4, vone);
vp5 = _mm512_fmadd_ps(vp5, vt5, vone);
vp6 = _mm512_fmadd_ps(vp6, vt6, vone);
vp7 = _mm512_fmadd_ps(vp7, vt7, vone);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 ve4 = _mm512_scalef_ps(vp4, vn4);
const __m512 ve5 = _mm512_scalef_ps(vp5, vn5);
const __m512 ve6 = _mm512_scalef_ps(vp6, vn6);
const __m512 ve7 = _mm512_scalef_ps(vp7, vn7);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
const __m512 vd4 = _mm512_add_ps(ve4, vone);
const __m512 vd5 = _mm512_add_ps(ve5, vone);
const __m512 vd6 = _mm512_add_ps(ve6, vone);
const __m512 vd7 = _mm512_add_ps(ve7, vone);
__m512 vr0 = _mm512_rcp14_ps(vd0);
__m512 vr1 = _mm512_rcp14_ps(vd1);
__m512 vr2 = _mm512_rcp14_ps(vd2);
__m512 vr3 = _mm512_rcp14_ps(vd3);
__m512 vr4 = _mm512_rcp14_ps(vd4);
__m512 vr5 = _mm512_rcp14_ps(vd5);
__m512 vr6 = _mm512_rcp14_ps(vd6);
__m512 vr7 = _mm512_rcp14_ps(vd7);
vr0 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
vr5 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr5, vd5, vone), vr5, vr5);
vr6 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr6, vd6, vone), vr6, vr6);
vr7 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr7, vd7, vone), vr7, vr7);
__m512 vf0 = _mm512_mul_ps(ve0, vr0);
__m512 vf1 = _mm512_mul_ps(ve1, vr1);
__m512 vf2 = _mm512_mul_ps(ve2, vr2);
__m512 vf3 = _mm512_mul_ps(ve3, vr3);
__m512 vf4 = _mm512_mul_ps(ve4, vr4);
__m512 vf5 = _mm512_mul_ps(ve5, vr5);
__m512 vf6 = _mm512_mul_ps(ve6, vr6);
__m512 vf7 = _mm512_mul_ps(ve7, vr7);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
vf4 = _mm512_mask_sub_ps(vf4, _mm512_testn_epi32_mask(_mm512_castps_si512(vx4), vsign_mask), vone, vf4);
vf5 = _mm512_mask_sub_ps(vf5, _mm512_testn_epi32_mask(_mm512_castps_si512(vx5), vsign_mask), vone, vf5);
vf6 = _mm512_mask_sub_ps(vf6, _mm512_testn_epi32_mask(_mm512_castps_si512(vx6), vsign_mask), vone, vf6);
vf7 = _mm512_mask_sub_ps(vf7, _mm512_testn_epi32_mask(_mm512_castps_si512(vx7), vsign_mask), vone, vf7);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
output += 128;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 10,957 | 41.146154 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-p5-scalef-nr1fma-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_p5.sign_mask);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_p5.one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 3,547 | 33.446602 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-p5-scalef-nr1fma-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_p5.sign_mask);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_p5.one);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
input += 32;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
__m512 vn0 = _mm512_mul_ps(vz0, vlog2e);
__m512 vn1 = _mm512_mul_ps(vz1, vlog2e);
vn0 = _mm512_roundscale_ps(vn0, 0);
vn1 = _mm512_roundscale_ps(vn1, 0);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vone);
vp1 = _mm512_fmadd_ps(vp1, vt1, vone);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
__m512 vr0 = _mm512_rcp14_ps(vd0);
__m512 vr1 = _mm512_rcp14_ps(vd1);
vr0 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
__m512 vf0 = _mm512_mul_ps(ve0, vr0);
__m512 vf1 = _mm512_mul_ps(ve1, vr1);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 5,484 | 33.71519 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-p5-scalef-nr1fma-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_p5.sign_mask);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_p5.one);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
input += 48;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
__m512 vn0 = _mm512_mul_ps(vz0, vlog2e);
__m512 vn1 = _mm512_mul_ps(vz1, vlog2e);
__m512 vn2 = _mm512_mul_ps(vz2, vlog2e);
vn0 = _mm512_roundscale_ps(vn0, 0);
vn1 = _mm512_roundscale_ps(vn1, 0);
vn2 = _mm512_roundscale_ps(vn2, 0);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vone);
vp1 = _mm512_fmadd_ps(vp1, vt1, vone);
vp2 = _mm512_fmadd_ps(vp2, vt2, vone);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
__m512 vr0 = _mm512_rcp14_ps(vd0);
__m512 vr1 = _mm512_rcp14_ps(vd1);
__m512 vr2 = _mm512_rcp14_ps(vd2);
vr0 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
__m512 vf0 = _mm512_mul_ps(ve0, vr0);
__m512 vf1 = _mm512_mul_ps(ve1, vr1);
__m512 vf2 = _mm512_mul_ps(ve2, vr2);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
output += 48;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 6,395 | 35.548571 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-p5-scalef-nr1fma-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_p5.sign_mask);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_p5.one);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
input += 64;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
__m512 vn0 = _mm512_mul_ps(vz0, vlog2e);
__m512 vn1 = _mm512_mul_ps(vz1, vlog2e);
__m512 vn2 = _mm512_mul_ps(vz2, vlog2e);
__m512 vn3 = _mm512_mul_ps(vz3, vlog2e);
vn0 = _mm512_roundscale_ps(vn0, 0);
vn1 = _mm512_roundscale_ps(vn1, 0);
vn2 = _mm512_roundscale_ps(vn2, 0);
vn3 = _mm512_roundscale_ps(vn3, 0);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vone);
vp1 = _mm512_fmadd_ps(vp1, vt1, vone);
vp2 = _mm512_fmadd_ps(vp2, vt2, vone);
vp3 = _mm512_fmadd_ps(vp3, vt3, vone);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
__m512 vr0 = _mm512_rcp14_ps(vd0);
__m512 vr1 = _mm512_rcp14_ps(vd1);
__m512 vr2 = _mm512_rcp14_ps(vd2);
__m512 vr3 = _mm512_rcp14_ps(vd3);
vr0 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
__m512 vf0 = _mm512_mul_ps(ve0, vr0);
__m512 vf1 = _mm512_mul_ps(ve1, vr1);
__m512 vf2 = _mm512_mul_ps(ve2, vr2);
__m512 vf3 = _mm512_mul_ps(ve3, vr3);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
output += 64;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 7,306 | 37.057292 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-p5-scalef-nr1fma-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_p5.sign_mask);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_p5.one);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
input += 80;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
const __m512 vz4 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx4), vsign_mask));
__m512 vn0 = _mm512_mul_ps(vz0, vlog2e);
__m512 vn1 = _mm512_mul_ps(vz1, vlog2e);
__m512 vn2 = _mm512_mul_ps(vz2, vlog2e);
__m512 vn3 = _mm512_mul_ps(vz3, vlog2e);
__m512 vn4 = _mm512_mul_ps(vz4, vlog2e);
vn0 = _mm512_roundscale_ps(vn0, 0);
vn1 = _mm512_roundscale_ps(vn1, 0);
vn2 = _mm512_roundscale_ps(vn2, 0);
vn3 = _mm512_roundscale_ps(vn3, 0);
vn4 = _mm512_roundscale_ps(vn4, 0);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vone);
vp1 = _mm512_fmadd_ps(vp1, vt1, vone);
vp2 = _mm512_fmadd_ps(vp2, vt2, vone);
vp3 = _mm512_fmadd_ps(vp3, vt3, vone);
vp4 = _mm512_fmadd_ps(vp4, vt4, vone);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 ve4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
const __m512 vd4 = _mm512_add_ps(ve4, vone);
__m512 vr0 = _mm512_rcp14_ps(vd0);
__m512 vr1 = _mm512_rcp14_ps(vd1);
__m512 vr2 = _mm512_rcp14_ps(vd2);
__m512 vr3 = _mm512_rcp14_ps(vd3);
__m512 vr4 = _mm512_rcp14_ps(vd4);
vr0 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
__m512 vf0 = _mm512_mul_ps(ve0, vr0);
__m512 vf1 = _mm512_mul_ps(ve1, vr1);
__m512 vf2 = _mm512_mul_ps(ve2, vr2);
__m512 vf3 = _mm512_mul_ps(ve3, vr3);
__m512 vf4 = _mm512_mul_ps(ve4, vr4);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
vf4 = _mm512_mask_sub_ps(vf4, _mm512_testn_epi32_mask(_mm512_castps_si512(vx4), vsign_mask), vone, vf4);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
output += 80;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 8,217 | 38.320574 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr1-p5-scalef-nr1fma-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x96(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_p5.sign_mask);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr1_p5.one);
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
input += 96;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
const __m512 vz4 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx4), vsign_mask));
const __m512 vz5 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx5), vsign_mask));
__m512 vn0 = _mm512_mul_ps(vz0, vlog2e);
__m512 vn1 = _mm512_mul_ps(vz1, vlog2e);
__m512 vn2 = _mm512_mul_ps(vz2, vlog2e);
__m512 vn3 = _mm512_mul_ps(vz3, vlog2e);
__m512 vn4 = _mm512_mul_ps(vz4, vlog2e);
__m512 vn5 = _mm512_mul_ps(vz5, vlog2e);
vn0 = _mm512_roundscale_ps(vn0, 0);
vn1 = _mm512_roundscale_ps(vn1, 0);
vn2 = _mm512_roundscale_ps(vn2, 0);
vn3 = _mm512_roundscale_ps(vn3, 0);
vn4 = _mm512_roundscale_ps(vn4, 0);
vn5 = _mm512_roundscale_ps(vn5, 0);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vz5);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vone);
vp1 = _mm512_fmadd_ps(vp1, vt1, vone);
vp2 = _mm512_fmadd_ps(vp2, vt2, vone);
vp3 = _mm512_fmadd_ps(vp3, vt3, vone);
vp4 = _mm512_fmadd_ps(vp4, vt4, vone);
vp5 = _mm512_fmadd_ps(vp5, vt5, vone);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 ve4 = _mm512_scalef_ps(vp4, vn4);
const __m512 ve5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
const __m512 vd4 = _mm512_add_ps(ve4, vone);
const __m512 vd5 = _mm512_add_ps(ve5, vone);
__m512 vr0 = _mm512_rcp14_ps(vd0);
__m512 vr1 = _mm512_rcp14_ps(vd1);
__m512 vr2 = _mm512_rcp14_ps(vd2);
__m512 vr3 = _mm512_rcp14_ps(vd3);
__m512 vr4 = _mm512_rcp14_ps(vd4);
__m512 vr5 = _mm512_rcp14_ps(vd5);
vr0 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
vr5 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr5, vd5, vone), vr5, vr5);
__m512 vf0 = _mm512_mul_ps(ve0, vr0);
__m512 vf1 = _mm512_mul_ps(ve1, vr1);
__m512 vf2 = _mm512_mul_ps(ve2, vr2);
__m512 vf3 = _mm512_mul_ps(ve3, vr3);
__m512 vf4 = _mm512_mul_ps(ve4, vr4);
__m512 vf5 = _mm512_mul_ps(ve5, vr5);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
vf4 = _mm512_mask_sub_ps(vf4, _mm512_testn_epi32_mask(_mm512_castps_si512(vx4), vsign_mask), vone, vf4);
vf5 = _mm512_mask_sub_ps(vf5, _mm512_testn_epi32_mask(_mm512_castps_si512(vx5), vsign_mask), vone, vf5);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
output += 96;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vone);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 9,128 | 39.393805 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr2-lut32-p2-perm2-scalef-div-x112.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr2-lut32-p2-perm2-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x112(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr2_lut32_p2.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr2_lut32_p2.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr2_lut32_p2.log2e);
const __m512 vtable_lo = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_lo);
const __m512 vtable_hi = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_hi);
const __m512 vminus_ln2_hi = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_hi);
const __m512 vminus_ln2_lo = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_lo);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr2_lut32_p2.one);
for (; batch >= 112 * sizeof(float); batch -= 112 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
input += 112;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
const __m512 vz4 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx4), vsign_mask));
const __m512 vz5 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx5), vsign_mask));
const __m512 vz6 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx6), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn0), vtable_hi);
const __m512 vl1 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn1), vtable_hi);
const __m512 vl2 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn2), vtable_hi);
const __m512 vl3 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn3), vtable_hi);
const __m512 vl4 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn4), vtable_hi);
const __m512 vl5 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn5), vtable_hi);
const __m512 vl6 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn6), vtable_hi);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vz4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vz5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vz6);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc2, vc1);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc2, vc1);
__m512 vp2 = _mm512_fmadd_ps(vt2, vc2, vc1);
__m512 vp3 = _mm512_fmadd_ps(vt3, vc2, vc1);
__m512 vp4 = _mm512_fmadd_ps(vt4, vc2, vc1);
__m512 vp5 = _mm512_fmadd_ps(vt5, vc2, vc1);
__m512 vp6 = _mm512_fmadd_ps(vt6, vc2, vc1);
vt0 = _mm512_mul_ps(vt0, vl0);
vt1 = _mm512_mul_ps(vt1, vl1);
vt2 = _mm512_mul_ps(vt2, vl2);
vt3 = _mm512_mul_ps(vt3, vl3);
vt4 = _mm512_mul_ps(vt4, vl4);
vt5 = _mm512_mul_ps(vt5, vl5);
vt6 = _mm512_mul_ps(vt6, vl6);
vp0 = _mm512_fmadd_ps(vt0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vl1);
vp2 = _mm512_fmadd_ps(vt2, vp2, vl2);
vp3 = _mm512_fmadd_ps(vt3, vp3, vl3);
vp4 = _mm512_fmadd_ps(vt4, vp4, vl4);
vp5 = _mm512_fmadd_ps(vt5, vp5, vl5);
vp6 = _mm512_fmadd_ps(vt6, vp6, vl6);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 ve4 = _mm512_scalef_ps(vp4, vn4);
const __m512 ve5 = _mm512_scalef_ps(vp5, vn5);
const __m512 ve6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
const __m512 vd4 = _mm512_add_ps(ve4, vone);
const __m512 vd5 = _mm512_add_ps(ve5, vone);
const __m512 vd6 = _mm512_add_ps(ve6, vone);
__m512 vf0 = _mm512_div_ps(ve0, vd0);
__m512 vf1 = _mm512_div_ps(ve1, vd1);
__m512 vf2 = _mm512_div_ps(ve2, vd2);
__m512 vf3 = _mm512_div_ps(ve3, vd3);
__m512 vf4 = _mm512_div_ps(ve4, vd4);
__m512 vf5 = _mm512_div_ps(ve5, vd5);
__m512 vf6 = _mm512_div_ps(ve6, vd6);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
vf4 = _mm512_mask_sub_ps(vf4, _mm512_testn_epi32_mask(_mm512_castps_si512(vx4), vsign_mask), vone, vf4);
vf5 = _mm512_mask_sub_ps(vf5, _mm512_testn_epi32_mask(_mm512_castps_si512(vx5), vsign_mask), vone, vf5);
vf6 = _mm512_mask_sub_ps(vf6, _mm512_testn_epi32_mask(_mm512_castps_si512(vx6), vsign_mask), vone, vf6);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
output += 112;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 9,971 | 43.717489 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr2-lut32-p2-perm2-scalef-div-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr2-lut32-p2-perm2-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x128(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr2_lut32_p2.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr2_lut32_p2.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr2_lut32_p2.log2e);
const __m512 vtable_lo = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_lo);
const __m512 vtable_hi = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_hi);
const __m512 vminus_ln2_hi = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_hi);
const __m512 vminus_ln2_lo = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_lo);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr2_lut32_p2.one);
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
input += 128;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
const __m512 vz4 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx4), vsign_mask));
const __m512 vz5 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx5), vsign_mask));
const __m512 vz6 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx6), vsign_mask));
const __m512 vz7 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx7), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn0), vtable_hi);
const __m512 vl1 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn1), vtable_hi);
const __m512 vl2 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn2), vtable_hi);
const __m512 vl3 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn3), vtable_hi);
const __m512 vl4 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn4), vtable_hi);
const __m512 vl5 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn5), vtable_hi);
const __m512 vl6 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn6), vtable_hi);
const __m512 vl7 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn7), vtable_hi);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vz4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vz5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vz6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vz7);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc2, vc1);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc2, vc1);
__m512 vp2 = _mm512_fmadd_ps(vt2, vc2, vc1);
__m512 vp3 = _mm512_fmadd_ps(vt3, vc2, vc1);
__m512 vp4 = _mm512_fmadd_ps(vt4, vc2, vc1);
__m512 vp5 = _mm512_fmadd_ps(vt5, vc2, vc1);
__m512 vp6 = _mm512_fmadd_ps(vt6, vc2, vc1);
__m512 vp7 = _mm512_fmadd_ps(vt7, vc2, vc1);
vt0 = _mm512_mul_ps(vt0, vl0);
vt1 = _mm512_mul_ps(vt1, vl1);
vt2 = _mm512_mul_ps(vt2, vl2);
vt3 = _mm512_mul_ps(vt3, vl3);
vt4 = _mm512_mul_ps(vt4, vl4);
vt5 = _mm512_mul_ps(vt5, vl5);
vt6 = _mm512_mul_ps(vt6, vl6);
vt7 = _mm512_mul_ps(vt7, vl7);
vp0 = _mm512_fmadd_ps(vt0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vl1);
vp2 = _mm512_fmadd_ps(vt2, vp2, vl2);
vp3 = _mm512_fmadd_ps(vt3, vp3, vl3);
vp4 = _mm512_fmadd_ps(vt4, vp4, vl4);
vp5 = _mm512_fmadd_ps(vt5, vp5, vl5);
vp6 = _mm512_fmadd_ps(vt6, vp6, vl6);
vp7 = _mm512_fmadd_ps(vt7, vp7, vl7);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 ve4 = _mm512_scalef_ps(vp4, vn4);
const __m512 ve5 = _mm512_scalef_ps(vp5, vn5);
const __m512 ve6 = _mm512_scalef_ps(vp6, vn6);
const __m512 ve7 = _mm512_scalef_ps(vp7, vn7);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
const __m512 vd4 = _mm512_add_ps(ve4, vone);
const __m512 vd5 = _mm512_add_ps(ve5, vone);
const __m512 vd6 = _mm512_add_ps(ve6, vone);
const __m512 vd7 = _mm512_add_ps(ve7, vone);
__m512 vf0 = _mm512_div_ps(ve0, vd0);
__m512 vf1 = _mm512_div_ps(ve1, vd1);
__m512 vf2 = _mm512_div_ps(ve2, vd2);
__m512 vf3 = _mm512_div_ps(ve3, vd3);
__m512 vf4 = _mm512_div_ps(ve4, vd4);
__m512 vf5 = _mm512_div_ps(ve5, vd5);
__m512 vf6 = _mm512_div_ps(ve6, vd6);
__m512 vf7 = _mm512_div_ps(ve7, vd7);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
vf4 = _mm512_mask_sub_ps(vf4, _mm512_testn_epi32_mask(_mm512_castps_si512(vx4), vsign_mask), vone, vf4);
vf5 = _mm512_mask_sub_ps(vf5, _mm512_testn_epi32_mask(_mm512_castps_si512(vx5), vsign_mask), vone, vf5);
vf6 = _mm512_mask_sub_ps(vf6, _mm512_testn_epi32_mask(_mm512_castps_si512(vx6), vsign_mask), vone, vf6);
vf7 = _mm512_mask_sub_ps(vf7, _mm512_testn_epi32_mask(_mm512_castps_si512(vx7), vsign_mask), vone, vf7);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
output += 128;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 10,850 | 44.592437 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr2-lut32-p2-perm2-scalef-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr2-lut32-p2-perm2-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr2_lut32_p2.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr2_lut32_p2.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr2_lut32_p2.log2e);
const __m512 vtable_lo = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_lo);
const __m512 vtable_hi = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_hi);
const __m512 vminus_ln2_hi = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_hi);
const __m512 vminus_ln2_lo = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_lo);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr2_lut32_p2.one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 3,715 | 36.16 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr2-lut32-p2-perm2-scalef-div-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr2-lut32-p2-perm2-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr2_lut32_p2.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr2_lut32_p2.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr2_lut32_p2.log2e);
const __m512 vtable_lo = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_lo);
const __m512 vtable_hi = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_hi);
const __m512 vminus_ln2_hi = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_hi);
const __m512 vminus_ln2_lo = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_lo);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr2_lut32_p2.one);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
input += 32;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn0), vtable_hi);
const __m512 vl1 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn1), vtable_hi);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vz1);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc2, vc1);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc2, vc1);
vt0 = _mm512_mul_ps(vt0, vl0);
vt1 = _mm512_mul_ps(vt1, vl1);
vp0 = _mm512_fmadd_ps(vt0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vl1);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
__m512 vf0 = _mm512_div_ps(ve0, vd0);
__m512 vf1 = _mm512_div_ps(ve1, vd1);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 5,581 | 36.716216 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr2-lut32-p2-perm2-scalef-div-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr2-lut32-p2-perm2-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr2_lut32_p2.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr2_lut32_p2.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr2_lut32_p2.log2e);
const __m512 vtable_lo = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_lo);
const __m512 vtable_hi = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_hi);
const __m512 vminus_ln2_hi = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_hi);
const __m512 vminus_ln2_lo = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_lo);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr2_lut32_p2.one);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
input += 48;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn0), vtable_hi);
const __m512 vl1 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn1), vtable_hi);
const __m512 vl2 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn2), vtable_hi);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vz2);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc2, vc1);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc2, vc1);
__m512 vp2 = _mm512_fmadd_ps(vt2, vc2, vc1);
vt0 = _mm512_mul_ps(vt0, vl0);
vt1 = _mm512_mul_ps(vt1, vl1);
vt2 = _mm512_mul_ps(vt2, vl2);
vp0 = _mm512_fmadd_ps(vt0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vl1);
vp2 = _mm512_fmadd_ps(vt2, vp2, vl2);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
__m512 vf0 = _mm512_div_ps(ve0, vd0);
__m512 vf1 = _mm512_div_ps(ve1, vd1);
__m512 vf2 = _mm512_div_ps(ve2, vd2);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
output += 48;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 6,458 | 38.625767 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr2-lut32-p2-perm2-scalef-div-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr2-lut32-p2-perm2-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr2_lut32_p2.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr2_lut32_p2.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr2_lut32_p2.log2e);
const __m512 vtable_lo = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_lo);
const __m512 vtable_hi = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_hi);
const __m512 vminus_ln2_hi = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_hi);
const __m512 vminus_ln2_lo = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_lo);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr2_lut32_p2.one);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
input += 64;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn0), vtable_hi);
const __m512 vl1 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn1), vtable_hi);
const __m512 vl2 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn2), vtable_hi);
const __m512 vl3 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn3), vtable_hi);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vz3);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc2, vc1);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc2, vc1);
__m512 vp2 = _mm512_fmadd_ps(vt2, vc2, vc1);
__m512 vp3 = _mm512_fmadd_ps(vt3, vc2, vc1);
vt0 = _mm512_mul_ps(vt0, vl0);
vt1 = _mm512_mul_ps(vt1, vl1);
vt2 = _mm512_mul_ps(vt2, vl2);
vt3 = _mm512_mul_ps(vt3, vl3);
vp0 = _mm512_fmadd_ps(vt0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vl1);
vp2 = _mm512_fmadd_ps(vt2, vp2, vl2);
vp3 = _mm512_fmadd_ps(vt3, vp3, vl3);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
__m512 vf0 = _mm512_div_ps(ve0, vd0);
__m512 vf1 = _mm512_div_ps(ve1, vd1);
__m512 vf2 = _mm512_div_ps(ve2, vd2);
__m512 vf3 = _mm512_div_ps(ve3, vd3);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
output += 64;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 7,335 | 40.213483 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr2-lut32-p2-perm2-scalef-div-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr2-lut32-p2-perm2-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr2_lut32_p2.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr2_lut32_p2.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr2_lut32_p2.log2e);
const __m512 vtable_lo = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_lo);
const __m512 vtable_hi = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_hi);
const __m512 vminus_ln2_hi = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_hi);
const __m512 vminus_ln2_lo = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_lo);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr2_lut32_p2.one);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
input += 80;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
const __m512 vz4 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx4), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn0), vtable_hi);
const __m512 vl1 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn1), vtable_hi);
const __m512 vl2 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn2), vtable_hi);
const __m512 vl3 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn3), vtable_hi);
const __m512 vl4 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn4), vtable_hi);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vz4);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc2, vc1);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc2, vc1);
__m512 vp2 = _mm512_fmadd_ps(vt2, vc2, vc1);
__m512 vp3 = _mm512_fmadd_ps(vt3, vc2, vc1);
__m512 vp4 = _mm512_fmadd_ps(vt4, vc2, vc1);
vt0 = _mm512_mul_ps(vt0, vl0);
vt1 = _mm512_mul_ps(vt1, vl1);
vt2 = _mm512_mul_ps(vt2, vl2);
vt3 = _mm512_mul_ps(vt3, vl3);
vt4 = _mm512_mul_ps(vt4, vl4);
vp0 = _mm512_fmadd_ps(vt0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vl1);
vp2 = _mm512_fmadd_ps(vt2, vp2, vl2);
vp3 = _mm512_fmadd_ps(vt3, vp3, vl3);
vp4 = _mm512_fmadd_ps(vt4, vp4, vl4);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 ve4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
const __m512 vd4 = _mm512_add_ps(ve4, vone);
__m512 vf0 = _mm512_div_ps(ve0, vd0);
__m512 vf1 = _mm512_div_ps(ve1, vd1);
__m512 vf2 = _mm512_div_ps(ve2, vd2);
__m512 vf3 = _mm512_div_ps(ve3, vd3);
__m512 vf4 = _mm512_div_ps(ve4, vd4);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
vf4 = _mm512_mask_sub_ps(vf4, _mm512_testn_epi32_mask(_mm512_castps_si512(vx4), vsign_mask), vone, vf4);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
output += 80;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 8,212 | 41.554404 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr2-lut32-p2-perm2-scalef-div-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr2-lut32-p2-perm2-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x96(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr2_lut32_p2.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr2_lut32_p2.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr2_lut32_p2.log2e);
const __m512 vtable_lo = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_lo);
const __m512 vtable_hi = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_hi);
const __m512 vminus_ln2_hi = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_hi);
const __m512 vminus_ln2_lo = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_lo);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr2_lut32_p2.one);
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
input += 96;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
const __m512 vz4 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx4), vsign_mask));
const __m512 vz5 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx5), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn0), vtable_hi);
const __m512 vl1 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn1), vtable_hi);
const __m512 vl2 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn2), vtable_hi);
const __m512 vl3 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn3), vtable_hi);
const __m512 vl4 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn4), vtable_hi);
const __m512 vl5 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn5), vtable_hi);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vz4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vz5);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc2, vc1);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc2, vc1);
__m512 vp2 = _mm512_fmadd_ps(vt2, vc2, vc1);
__m512 vp3 = _mm512_fmadd_ps(vt3, vc2, vc1);
__m512 vp4 = _mm512_fmadd_ps(vt4, vc2, vc1);
__m512 vp5 = _mm512_fmadd_ps(vt5, vc2, vc1);
vt0 = _mm512_mul_ps(vt0, vl0);
vt1 = _mm512_mul_ps(vt1, vl1);
vt2 = _mm512_mul_ps(vt2, vl2);
vt3 = _mm512_mul_ps(vt3, vl3);
vt4 = _mm512_mul_ps(vt4, vl4);
vt5 = _mm512_mul_ps(vt5, vl5);
vp0 = _mm512_fmadd_ps(vt0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vl1);
vp2 = _mm512_fmadd_ps(vt2, vp2, vl2);
vp3 = _mm512_fmadd_ps(vt3, vp3, vl3);
vp4 = _mm512_fmadd_ps(vt4, vp4, vl4);
vp5 = _mm512_fmadd_ps(vt5, vp5, vl5);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 ve4 = _mm512_scalef_ps(vp4, vn4);
const __m512 ve5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
const __m512 vd4 = _mm512_add_ps(ve4, vone);
const __m512 vd5 = _mm512_add_ps(ve5, vone);
__m512 vf0 = _mm512_div_ps(ve0, vd0);
__m512 vf1 = _mm512_div_ps(ve1, vd1);
__m512 vf2 = _mm512_div_ps(ve2, vd2);
__m512 vf3 = _mm512_div_ps(ve3, vd3);
__m512 vf4 = _mm512_div_ps(ve4, vd4);
__m512 vf5 = _mm512_div_ps(ve5, vd5);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
vf4 = _mm512_mask_sub_ps(vf4, _mm512_testn_epi32_mask(_mm512_castps_si512(vx4), vsign_mask), vone, vf4);
vf5 = _mm512_mask_sub_ps(vf5, _mm512_testn_epi32_mask(_mm512_castps_si512(vx5), vsign_mask), vone, vf5);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
output += 96;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vf = _mm512_div_ps(ve, vd);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 9,089 | 42.701923 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr2-lut32-p2-perm2-scalef-nr1fma-x112.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr2-lut32-p2-perm2-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x112(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr2_lut32_p2.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr2_lut32_p2.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr2_lut32_p2.log2e);
const __m512 vtable_lo = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_lo);
const __m512 vtable_hi = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_hi);
const __m512 vminus_ln2_hi = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_hi);
const __m512 vminus_ln2_lo = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_lo);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr2_lut32_p2.one);
for (; batch >= 112 * sizeof(float); batch -= 112 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
input += 112;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
const __m512 vz4 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx4), vsign_mask));
const __m512 vz5 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx5), vsign_mask));
const __m512 vz6 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx6), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn0), vtable_hi);
const __m512 vl1 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn1), vtable_hi);
const __m512 vl2 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn2), vtable_hi);
const __m512 vl3 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn3), vtable_hi);
const __m512 vl4 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn4), vtable_hi);
const __m512 vl5 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn5), vtable_hi);
const __m512 vl6 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn6), vtable_hi);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vz4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vz5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vz6);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc2, vc1);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc2, vc1);
__m512 vp2 = _mm512_fmadd_ps(vt2, vc2, vc1);
__m512 vp3 = _mm512_fmadd_ps(vt3, vc2, vc1);
__m512 vp4 = _mm512_fmadd_ps(vt4, vc2, vc1);
__m512 vp5 = _mm512_fmadd_ps(vt5, vc2, vc1);
__m512 vp6 = _mm512_fmadd_ps(vt6, vc2, vc1);
vt0 = _mm512_mul_ps(vt0, vl0);
vt1 = _mm512_mul_ps(vt1, vl1);
vt2 = _mm512_mul_ps(vt2, vl2);
vt3 = _mm512_mul_ps(vt3, vl3);
vt4 = _mm512_mul_ps(vt4, vl4);
vt5 = _mm512_mul_ps(vt5, vl5);
vt6 = _mm512_mul_ps(vt6, vl6);
vp0 = _mm512_fmadd_ps(vt0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vl1);
vp2 = _mm512_fmadd_ps(vt2, vp2, vl2);
vp3 = _mm512_fmadd_ps(vt3, vp3, vl3);
vp4 = _mm512_fmadd_ps(vt4, vp4, vl4);
vp5 = _mm512_fmadd_ps(vt5, vp5, vl5);
vp6 = _mm512_fmadd_ps(vt6, vp6, vl6);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 ve4 = _mm512_scalef_ps(vp4, vn4);
const __m512 ve5 = _mm512_scalef_ps(vp5, vn5);
const __m512 ve6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
const __m512 vd4 = _mm512_add_ps(ve4, vone);
const __m512 vd5 = _mm512_add_ps(ve5, vone);
const __m512 vd6 = _mm512_add_ps(ve6, vone);
__m512 vr0 = _mm512_rcp14_ps(vd0);
__m512 vr1 = _mm512_rcp14_ps(vd1);
__m512 vr2 = _mm512_rcp14_ps(vd2);
__m512 vr3 = _mm512_rcp14_ps(vd3);
__m512 vr4 = _mm512_rcp14_ps(vd4);
__m512 vr5 = _mm512_rcp14_ps(vd5);
__m512 vr6 = _mm512_rcp14_ps(vd6);
vr0 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
vr5 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr5, vd5, vone), vr5, vr5);
vr6 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr6, vd6, vone), vr6, vr6);
__m512 vf0 = _mm512_mul_ps(ve0, vr0);
__m512 vf1 = _mm512_mul_ps(ve1, vr1);
__m512 vf2 = _mm512_mul_ps(ve2, vr2);
__m512 vf3 = _mm512_mul_ps(ve3, vr3);
__m512 vf4 = _mm512_mul_ps(ve4, vr4);
__m512 vf5 = _mm512_mul_ps(ve5, vr5);
__m512 vf6 = _mm512_mul_ps(ve6, vr6);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
vf4 = _mm512_mask_sub_ps(vf4, _mm512_testn_epi32_mask(_mm512_castps_si512(vx4), vsign_mask), vone, vf4);
vf5 = _mm512_mask_sub_ps(vf5, _mm512_testn_epi32_mask(_mm512_castps_si512(vx5), vsign_mask), vone, vf5);
vf6 = _mm512_mask_sub_ps(vf6, _mm512_testn_epi32_mask(_mm512_castps_si512(vx6), vsign_mask), vone, vf6);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
output += 112;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 10,955 | 43.536585 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr2-lut32-p2-perm2-scalef-nr1fma-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr2-lut32-p2-perm2-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x128(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr2_lut32_p2.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr2_lut32_p2.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr2_lut32_p2.log2e);
const __m512 vtable_lo = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_lo);
const __m512 vtable_hi = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_hi);
const __m512 vminus_ln2_hi = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_hi);
const __m512 vminus_ln2_lo = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_lo);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr2_lut32_p2.one);
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
input += 128;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
const __m512 vz4 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx4), vsign_mask));
const __m512 vz5 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx5), vsign_mask));
const __m512 vz6 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx6), vsign_mask));
const __m512 vz7 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx7), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn0), vtable_hi);
const __m512 vl1 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn1), vtable_hi);
const __m512 vl2 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn2), vtable_hi);
const __m512 vl3 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn3), vtable_hi);
const __m512 vl4 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn4), vtable_hi);
const __m512 vl5 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn5), vtable_hi);
const __m512 vl6 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn6), vtable_hi);
const __m512 vl7 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn7), vtable_hi);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vz4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vz5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vz6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vz7);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc2, vc1);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc2, vc1);
__m512 vp2 = _mm512_fmadd_ps(vt2, vc2, vc1);
__m512 vp3 = _mm512_fmadd_ps(vt3, vc2, vc1);
__m512 vp4 = _mm512_fmadd_ps(vt4, vc2, vc1);
__m512 vp5 = _mm512_fmadd_ps(vt5, vc2, vc1);
__m512 vp6 = _mm512_fmadd_ps(vt6, vc2, vc1);
__m512 vp7 = _mm512_fmadd_ps(vt7, vc2, vc1);
vt0 = _mm512_mul_ps(vt0, vl0);
vt1 = _mm512_mul_ps(vt1, vl1);
vt2 = _mm512_mul_ps(vt2, vl2);
vt3 = _mm512_mul_ps(vt3, vl3);
vt4 = _mm512_mul_ps(vt4, vl4);
vt5 = _mm512_mul_ps(vt5, vl5);
vt6 = _mm512_mul_ps(vt6, vl6);
vt7 = _mm512_mul_ps(vt7, vl7);
vp0 = _mm512_fmadd_ps(vt0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vl1);
vp2 = _mm512_fmadd_ps(vt2, vp2, vl2);
vp3 = _mm512_fmadd_ps(vt3, vp3, vl3);
vp4 = _mm512_fmadd_ps(vt4, vp4, vl4);
vp5 = _mm512_fmadd_ps(vt5, vp5, vl5);
vp6 = _mm512_fmadd_ps(vt6, vp6, vl6);
vp7 = _mm512_fmadd_ps(vt7, vp7, vl7);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 ve4 = _mm512_scalef_ps(vp4, vn4);
const __m512 ve5 = _mm512_scalef_ps(vp5, vn5);
const __m512 ve6 = _mm512_scalef_ps(vp6, vn6);
const __m512 ve7 = _mm512_scalef_ps(vp7, vn7);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
const __m512 vd4 = _mm512_add_ps(ve4, vone);
const __m512 vd5 = _mm512_add_ps(ve5, vone);
const __m512 vd6 = _mm512_add_ps(ve6, vone);
const __m512 vd7 = _mm512_add_ps(ve7, vone);
__m512 vr0 = _mm512_rcp14_ps(vd0);
__m512 vr1 = _mm512_rcp14_ps(vd1);
__m512 vr2 = _mm512_rcp14_ps(vd2);
__m512 vr3 = _mm512_rcp14_ps(vd3);
__m512 vr4 = _mm512_rcp14_ps(vd4);
__m512 vr5 = _mm512_rcp14_ps(vd5);
__m512 vr6 = _mm512_rcp14_ps(vd6);
__m512 vr7 = _mm512_rcp14_ps(vd7);
vr0 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
vr5 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr5, vd5, vone), vr5, vr5);
vr6 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr6, vd6, vone), vr6, vr6);
vr7 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr7, vd7, vone), vr7, vr7);
__m512 vf0 = _mm512_mul_ps(ve0, vr0);
__m512 vf1 = _mm512_mul_ps(ve1, vr1);
__m512 vf2 = _mm512_mul_ps(ve2, vr2);
__m512 vf3 = _mm512_mul_ps(ve3, vr3);
__m512 vf4 = _mm512_mul_ps(ve4, vr4);
__m512 vf5 = _mm512_mul_ps(ve5, vr5);
__m512 vf6 = _mm512_mul_ps(ve6, vr6);
__m512 vf7 = _mm512_mul_ps(ve7, vr7);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
vf4 = _mm512_mask_sub_ps(vf4, _mm512_testn_epi32_mask(_mm512_castps_si512(vx4), vsign_mask), vone, vf4);
vf5 = _mm512_mask_sub_ps(vf5, _mm512_testn_epi32_mask(_mm512_castps_si512(vx5), vsign_mask), vone, vf5);
vf6 = _mm512_mask_sub_ps(vf6, _mm512_testn_epi32_mask(_mm512_castps_si512(vx6), vsign_mask), vone, vf6);
vf7 = _mm512_mask_sub_ps(vf7, _mm512_testn_epi32_mask(_mm512_castps_si512(vx7), vsign_mask), vone, vf7);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
output += 128;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 11,944 | 44.418251 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr2-lut32-p2-perm2-scalef-nr1fma-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr2-lut32-p2-perm2-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr2_lut32_p2.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr2_lut32_p2.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr2_lut32_p2.log2e);
const __m512 vtable_lo = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_lo);
const __m512 vtable_hi = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_hi);
const __m512 vminus_ln2_hi = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_hi);
const __m512 vminus_ln2_lo = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_lo);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr2_lut32_p2.one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 3,926 | 36.04717 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr2-lut32-p2-perm2-scalef-nr1fma-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr2-lut32-p2-perm2-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr2_lut32_p2.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr2_lut32_p2.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr2_lut32_p2.log2e);
const __m512 vtable_lo = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_lo);
const __m512 vtable_hi = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_hi);
const __m512 vminus_ln2_hi = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_hi);
const __m512 vminus_ln2_lo = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_lo);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr2_lut32_p2.one);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
input += 32;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn0), vtable_hi);
const __m512 vl1 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn1), vtable_hi);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vz1);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc2, vc1);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc2, vc1);
vt0 = _mm512_mul_ps(vt0, vl0);
vt1 = _mm512_mul_ps(vt1, vl1);
vp0 = _mm512_fmadd_ps(vt0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vl1);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
__m512 vr0 = _mm512_rcp14_ps(vd0);
__m512 vr1 = _mm512_rcp14_ps(vd1);
vr0 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
__m512 vf0 = _mm512_mul_ps(ve0, vr0);
__m512 vf1 = _mm512_mul_ps(ve1, vr1);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 6,015 | 36.36646 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr2-lut32-p2-perm2-scalef-nr1fma-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr2-lut32-p2-perm2-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr2_lut32_p2.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr2_lut32_p2.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr2_lut32_p2.log2e);
const __m512 vtable_lo = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_lo);
const __m512 vtable_hi = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_hi);
const __m512 vminus_ln2_hi = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_hi);
const __m512 vminus_ln2_lo = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_lo);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr2_lut32_p2.one);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
input += 48;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn0), vtable_hi);
const __m512 vl1 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn1), vtable_hi);
const __m512 vl2 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn2), vtable_hi);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vz2);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc2, vc1);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc2, vc1);
__m512 vp2 = _mm512_fmadd_ps(vt2, vc2, vc1);
vt0 = _mm512_mul_ps(vt0, vl0);
vt1 = _mm512_mul_ps(vt1, vl1);
vt2 = _mm512_mul_ps(vt2, vl2);
vp0 = _mm512_fmadd_ps(vt0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vl1);
vp2 = _mm512_fmadd_ps(vt2, vp2, vl2);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
__m512 vr0 = _mm512_rcp14_ps(vd0);
__m512 vr1 = _mm512_rcp14_ps(vd1);
__m512 vr2 = _mm512_rcp14_ps(vd2);
vr0 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
__m512 vf0 = _mm512_mul_ps(ve0, vr0);
__m512 vf1 = _mm512_mul_ps(ve1, vr1);
__m512 vf2 = _mm512_mul_ps(ve2, vr2);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
output += 48;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 7,002 | 38.342697 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr2-lut32-p2-perm2-scalef-nr1fma-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr2-lut32-p2-perm2-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr2_lut32_p2.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr2_lut32_p2.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr2_lut32_p2.log2e);
const __m512 vtable_lo = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_lo);
const __m512 vtable_hi = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_hi);
const __m512 vminus_ln2_hi = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_hi);
const __m512 vminus_ln2_lo = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_lo);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr2_lut32_p2.one);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
input += 64;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn0), vtable_hi);
const __m512 vl1 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn1), vtable_hi);
const __m512 vl2 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn2), vtable_hi);
const __m512 vl3 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn3), vtable_hi);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vz3);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc2, vc1);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc2, vc1);
__m512 vp2 = _mm512_fmadd_ps(vt2, vc2, vc1);
__m512 vp3 = _mm512_fmadd_ps(vt3, vc2, vc1);
vt0 = _mm512_mul_ps(vt0, vl0);
vt1 = _mm512_mul_ps(vt1, vl1);
vt2 = _mm512_mul_ps(vt2, vl2);
vt3 = _mm512_mul_ps(vt3, vl3);
vp0 = _mm512_fmadd_ps(vt0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vl1);
vp2 = _mm512_fmadd_ps(vt2, vp2, vl2);
vp3 = _mm512_fmadd_ps(vt3, vp3, vl3);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
__m512 vr0 = _mm512_rcp14_ps(vd0);
__m512 vr1 = _mm512_rcp14_ps(vd1);
__m512 vr2 = _mm512_rcp14_ps(vd2);
__m512 vr3 = _mm512_rcp14_ps(vd3);
vr0 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
__m512 vf0 = _mm512_mul_ps(ve0, vr0);
__m512 vf1 = _mm512_mul_ps(ve1, vr1);
__m512 vf2 = _mm512_mul_ps(ve2, vr2);
__m512 vf3 = _mm512_mul_ps(ve3, vr3);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
output += 64;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 7,989 | 39.974359 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr2-lut32-p2-perm2-scalef-nr1fma-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr2-lut32-p2-perm2-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr2_lut32_p2.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr2_lut32_p2.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr2_lut32_p2.log2e);
const __m512 vtable_lo = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_lo);
const __m512 vtable_hi = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_hi);
const __m512 vminus_ln2_hi = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_hi);
const __m512 vminus_ln2_lo = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_lo);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr2_lut32_p2.one);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
input += 80;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
const __m512 vz4 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx4), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn0), vtable_hi);
const __m512 vl1 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn1), vtable_hi);
const __m512 vl2 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn2), vtable_hi);
const __m512 vl3 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn3), vtable_hi);
const __m512 vl4 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn4), vtable_hi);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vz4);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc2, vc1);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc2, vc1);
__m512 vp2 = _mm512_fmadd_ps(vt2, vc2, vc1);
__m512 vp3 = _mm512_fmadd_ps(vt3, vc2, vc1);
__m512 vp4 = _mm512_fmadd_ps(vt4, vc2, vc1);
vt0 = _mm512_mul_ps(vt0, vl0);
vt1 = _mm512_mul_ps(vt1, vl1);
vt2 = _mm512_mul_ps(vt2, vl2);
vt3 = _mm512_mul_ps(vt3, vl3);
vt4 = _mm512_mul_ps(vt4, vl4);
vp0 = _mm512_fmadd_ps(vt0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vl1);
vp2 = _mm512_fmadd_ps(vt2, vp2, vl2);
vp3 = _mm512_fmadd_ps(vt3, vp3, vl3);
vp4 = _mm512_fmadd_ps(vt4, vp4, vl4);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 ve4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
const __m512 vd4 = _mm512_add_ps(ve4, vone);
__m512 vr0 = _mm512_rcp14_ps(vd0);
__m512 vr1 = _mm512_rcp14_ps(vd1);
__m512 vr2 = _mm512_rcp14_ps(vd2);
__m512 vr3 = _mm512_rcp14_ps(vd3);
__m512 vr4 = _mm512_rcp14_ps(vd4);
vr0 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
__m512 vf0 = _mm512_mul_ps(ve0, vr0);
__m512 vf1 = _mm512_mul_ps(ve1, vr1);
__m512 vf2 = _mm512_mul_ps(ve2, vr2);
__m512 vf3 = _mm512_mul_ps(ve3, vr3);
__m512 vf4 = _mm512_mul_ps(ve4, vr4);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
vf4 = _mm512_mask_sub_ps(vf4, _mm512_testn_epi32_mask(_mm512_castps_si512(vx4), vsign_mask), vone, vf4);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
output += 80;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 8,976 | 41.34434 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx512f-rr2-lut32-p2-perm2-scalef-nr1fma-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx512f-rr2-lut32-p2-perm2-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x96(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr2_lut32_p2.sign_mask);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr2_lut32_p2.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr2_lut32_p2.log2e);
const __m512 vtable_lo = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_lo);
const __m512 vtable_hi = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_hi);
const __m512 vminus_ln2_hi = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_hi);
const __m512 vminus_ln2_lo = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_lo);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c1);
const __m512 vone = _mm512_set1_ps(params->avx512_rr2_lut32_p2.one);
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
input += 96;
const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
const __m512 vz4 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx4), vsign_mask));
const __m512 vz5 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx5), vsign_mask));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vlog2e, vmagic_bias);
const __m512 vl0 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn0), vtable_hi);
const __m512 vl1 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn1), vtable_hi);
const __m512 vl2 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn2), vtable_hi);
const __m512 vl3 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn3), vtable_hi);
const __m512 vl4 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn4), vtable_hi);
const __m512 vl5 = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn5), vtable_hi);
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vz4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vz5);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
__m512 vp0 = _mm512_fmadd_ps(vt0, vc2, vc1);
__m512 vp1 = _mm512_fmadd_ps(vt1, vc2, vc1);
__m512 vp2 = _mm512_fmadd_ps(vt2, vc2, vc1);
__m512 vp3 = _mm512_fmadd_ps(vt3, vc2, vc1);
__m512 vp4 = _mm512_fmadd_ps(vt4, vc2, vc1);
__m512 vp5 = _mm512_fmadd_ps(vt5, vc2, vc1);
vt0 = _mm512_mul_ps(vt0, vl0);
vt1 = _mm512_mul_ps(vt1, vl1);
vt2 = _mm512_mul_ps(vt2, vl2);
vt3 = _mm512_mul_ps(vt3, vl3);
vt4 = _mm512_mul_ps(vt4, vl4);
vt5 = _mm512_mul_ps(vt5, vl5);
vp0 = _mm512_fmadd_ps(vt0, vp0, vl0);
vp1 = _mm512_fmadd_ps(vt1, vp1, vl1);
vp2 = _mm512_fmadd_ps(vt2, vp2, vl2);
vp3 = _mm512_fmadd_ps(vt3, vp3, vl3);
vp4 = _mm512_fmadd_ps(vt4, vp4, vl4);
vp5 = _mm512_fmadd_ps(vt5, vp5, vl5);
const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
const __m512 ve4 = _mm512_scalef_ps(vp4, vn4);
const __m512 ve5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vd0 = _mm512_add_ps(ve0, vone);
const __m512 vd1 = _mm512_add_ps(ve1, vone);
const __m512 vd2 = _mm512_add_ps(ve2, vone);
const __m512 vd3 = _mm512_add_ps(ve3, vone);
const __m512 vd4 = _mm512_add_ps(ve4, vone);
const __m512 vd5 = _mm512_add_ps(ve5, vone);
__m512 vr0 = _mm512_rcp14_ps(vd0);
__m512 vr1 = _mm512_rcp14_ps(vd1);
__m512 vr2 = _mm512_rcp14_ps(vd2);
__m512 vr3 = _mm512_rcp14_ps(vd3);
__m512 vr4 = _mm512_rcp14_ps(vd4);
__m512 vr5 = _mm512_rcp14_ps(vd5);
vr0 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
vr2 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr2, vd2, vone), vr2, vr2);
vr3 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr3, vd3, vone), vr3, vr3);
vr4 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr4, vd4, vone), vr4, vr4);
vr5 = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr5, vd5, vone), vr5, vr5);
__m512 vf0 = _mm512_mul_ps(ve0, vr0);
__m512 vf1 = _mm512_mul_ps(ve1, vr1);
__m512 vf2 = _mm512_mul_ps(ve2, vr2);
__m512 vf3 = _mm512_mul_ps(ve3, vr3);
__m512 vf4 = _mm512_mul_ps(ve4, vr4);
__m512 vf5 = _mm512_mul_ps(ve5, vr5);
vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
vf4 = _mm512_mask_sub_ps(vf4, _mm512_testn_epi32_mask(_mm512_castps_si512(vx4), vsign_mask), vone, vf4);
vf5 = _mm512_mask_sub_ps(vf5, _mm512_testn_epi32_mask(_mm512_castps_si512(vx5), vsign_mask), vone, vf5);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
output += 96;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
__m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
vt = _mm512_mul_ps(vt, vl);
vp = _mm512_fmadd_ps(vt, vp, vl);
const __m512 ve = _mm512_scalef_ps(vp, vn);
const __m512 vd = _mm512_add_ps(ve, vone);
__m512 vr = _mm512_rcp14_ps(vd);
vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
__m512 vf = _mm512_mul_ps(ve, vr);
vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 9,963 | 42.510917 | 108 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neon-rr2-lut2048-p1-nr2recps-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__neon_rr2_lut2048_p1_nr2recps_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.ln2_hi);
const float32x4_t vln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.ln2_lo);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.denorm_cutoff);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 12);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxAB]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vln2_hi);
float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vln2_hi);
float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vln2_lo);
const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
const float32x4_t vp89AB = vmulq_f32(vt89AB, vc1);
const float32x4_t vy0123 = vmlaq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vmlaq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vmlaq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vmlaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vmlaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 10,723 | 46.035088 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neon-rr2-lut2048-p1-nr2recps-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__neon_rr2_lut2048_p1_nr2recps_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.ln2_hi);
const float32x4_t vln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.ln2_lo);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 12);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 12);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxEF]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vln2_hi);
float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vln2_hi);
float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vzCDEF, vnCDEF, vln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vln2_lo);
const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
const float32x4_t vp89AB = vmulq_f32(vt89AB, vc1);
const float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc1);
const float32x4_t vy0123 = vmlaq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vmlaq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vmlaq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vmlaq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vmlaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vmlaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 12,582 | 48.345098 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neon-rr2-lut2048-p1-nr2recps-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__neon_rr2_lut2048_p1_nr2recps_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.ln2_hi);
const float32x4_t vln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.ln2_lo);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.denorm_cutoff);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vmlaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 12);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 12);
const int32x4_t veGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 12);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxEF]);
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxIJ]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vln2_hi);
float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vln2_hi);
float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vzCDEF, vnCDEF, vln2_hi);
float32x4_t vtGHIJ = vmlaq_f32(vzGHIJ, vnGHIJ, vln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vln2_lo);
vtGHIJ = vmlaq_f32(vtGHIJ, vnGHIJ, vln2_lo);
const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
const float32x4_t vp89AB = vmulq_f32(vt89AB, vc1);
const float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc1);
const float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc1);
const float32x4_t vy0123 = vmlaq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vmlaq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vmlaq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vmlaq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vyGHIJ = vmlaq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(vyGHIJ, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
float32x4_t vfGHIJ = vmulq_f32(vyGHIJ, vrGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vmlaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vmlaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 14,441 | 50.212766 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neon-rr2-lut2048-p1-nr2recps-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__neon_rr2_lut2048_p1_nr2recps_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.ln2_hi);
const float32x4_t vln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.ln2_lo);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vxKLMN = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
const float32x4_t vzKLMN = vabsq_f32(vxKLMN);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vmlaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
float32x4_t vnKLMN = vmlaq_f32(vmagic_bias, vzKLMN, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 12);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 12);
const int32x4_t veGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 12);
const int32x4_t veKLMN = vshlq_n_s32(vreinterpretq_s32_f32(vnKLMN), 12);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64x2_t vidxKLMN = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnKLMN), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxEF]);
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxIJ]);
const uint64_t vidxKL = vgetq_lane_u64(vidxKLMN, 0);
const uint64_t vidxMN = vgetq_lane_u64(vidxKLMN, 1);
float32x2_t vlKL = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxKL]);
float32x2_t vlMN = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxMN]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
vlKL = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxKL >> 32)], vlKL, 1);
vlMN = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxMN >> 32)], vlMN, 1);
const float32x4_t vlKLMN = vcombine_f32(vlKL, vlMN);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
const float32x4_t vsKLMN = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlKLMN), veKLMN));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
vnKLMN = vsubq_f32(vnKLMN, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vln2_hi);
float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vln2_hi);
float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vzCDEF, vnCDEF, vln2_hi);
float32x4_t vtGHIJ = vmlaq_f32(vzGHIJ, vnGHIJ, vln2_hi);
float32x4_t vtKLMN = vmlaq_f32(vzKLMN, vnKLMN, vln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vln2_lo);
vtGHIJ = vmlaq_f32(vtGHIJ, vnGHIJ, vln2_lo);
vtKLMN = vmlaq_f32(vtKLMN, vnKLMN, vln2_lo);
const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
const float32x4_t vp89AB = vmulq_f32(vt89AB, vc1);
const float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc1);
const float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc1);
const float32x4_t vpKLMN = vmulq_f32(vtKLMN, vc1);
const float32x4_t vy0123 = vmlaq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vmlaq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vmlaq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vmlaq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vyGHIJ = vmlaq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
const float32x4_t vyKLMN = vmlaq_f32(vsKLMN, vsKLMN, vpKLMN);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(vyGHIJ, vone);
const float32x4_t vdKLMN = vaddq_f32(vyKLMN, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
float32x4_t vrKLMN = vrecpeq_f32(vdKLMN);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
vrKLMN = vmulq_f32(vrKLMN, vrecpsq_f32(vrKLMN, vdKLMN));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
vrKLMN = vmulq_f32(vrKLMN, vrecpsq_f32(vrKLMN, vdKLMN));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
float32x4_t vfGHIJ = vmulq_f32(vyGHIJ, vrGHIJ);
float32x4_t vfKLMN = vmulq_f32(vyKLMN, vrKLMN);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
vfKLMN = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfKLMN), vcagtq_f32(vxKLMN, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
const uint32x4_t vmKLMN = vcltq_f32(vxKLMN, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vfKLMN = vbslq_f32(vmKLMN, vfKLMN, vsubq_f32(vone, vfKLMN));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vst1q_f32(output, vfKLMN); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vmlaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vmlaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 16,300 | 51.754045 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neon-rr2-lut2048-p1-nr2recps-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__neon_rr2_lut2048_p1_nr2recps_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.ln2_hi);
const float32x4_t vln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.ln2_lo);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.denorm_cutoff);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vmlaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vmlaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 5,051 | 39.741935 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neon-rr2-lut2048-p1-nr2recps-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__neon_rr2_lut2048_p1_nr2recps_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.ln2_hi);
const float32x4_t vln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.ln2_lo);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut2048_p1.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vln2_hi);
float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vln2_lo);
const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
const float32x4_t vy0123 = vmlaq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vmlaq_f32(vs4567, vs4567, vp4567);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vmlaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vmlaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 8,861 | 43.089552 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neon-rr2-lut64-p2-nr2recps-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__neon_rr2_lut64_p2_nr2recps_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.ln2_hi);
const float32x4_t vln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.ln2_lo);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.denorm_cutoff);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 17);
// Use bits 0:6 bits of batch, as integer, as an index for table lookup of l := 2**(batch % 64).
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxAB]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vln2_hi);
float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vln2_hi);
float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vln2_lo);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
vp0123 = vmlsq_f32(vt0123, vp0123, vt0123);
vp4567 = vmlsq_f32(vt4567, vp4567, vt4567);
vp89AB = vmlsq_f32(vt89AB, vp89AB, vt89AB);
const float32x4_t vy0123 = vmlsq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vmlsq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vmlsq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlsq_f32(vt, vp, vt);
const float32x4_t vy = vmlsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlsq_f32(vt, vp, vt);
const float32x4_t vy = vmlsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 10,942 | 45.565957 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neon-rr2-lut64-p2-nr2recps-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__neon_rr2_lut64_p2_nr2recps_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.ln2_hi);
const float32x4_t vln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.ln2_lo);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 17);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 17);
// Use bits 0:6 bits of batch, as integer, as an index for table lookup of l := 2**(batch % 64).
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxEF]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vln2_hi);
float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vln2_hi);
float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vzCDEF, vnCDEF, vln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vln2_lo);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
vp0123 = vmlsq_f32(vt0123, vp0123, vt0123);
vp4567 = vmlsq_f32(vt4567, vp4567, vt4567);
vp89AB = vmlsq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vmlsq_f32(vtCDEF, vpCDEF, vtCDEF);
const float32x4_t vy0123 = vmlsq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vmlsq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vmlsq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vmlsq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlsq_f32(vt, vp, vt);
const float32x4_t vy = vmlsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlsq_f32(vt, vp, vt);
const float32x4_t vy = vmlsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 12,835 | 47.806084 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neon-rr2-lut64-p2-nr2recps-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__neon_rr2_lut64_p2_nr2recps_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.ln2_hi);
const float32x4_t vln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.ln2_lo);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.denorm_cutoff);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vmlaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 17);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 17);
const int32x4_t veGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 17);
// Use bits 0:6 bits of batch, as integer, as an index for table lookup of l := 2**(batch % 64).
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxEF]);
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxIJ]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vln2_hi);
float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vln2_hi);
float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vzCDEF, vnCDEF, vln2_hi);
float32x4_t vtGHIJ = vmlaq_f32(vzGHIJ, vnGHIJ, vln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vln2_lo);
vtGHIJ = vmlaq_f32(vtGHIJ, vnGHIJ, vln2_lo);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc2);
vp0123 = vmlsq_f32(vt0123, vp0123, vt0123);
vp4567 = vmlsq_f32(vt4567, vp4567, vt4567);
vp89AB = vmlsq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vmlsq_f32(vtCDEF, vpCDEF, vtCDEF);
vpGHIJ = vmlsq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
const float32x4_t vy0123 = vmlsq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vmlsq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vmlsq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vmlsq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vyGHIJ = vmlsq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(vyGHIJ, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
float32x4_t vfGHIJ = vmulq_f32(vyGHIJ, vrGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlsq_f32(vt, vp, vt);
const float32x4_t vy = vmlsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlsq_f32(vt, vp, vt);
const float32x4_t vy = vmlsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 14,728 | 49.61512 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neon-rr2-lut64-p2-nr2recps-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__neon_rr2_lut64_p2_nr2recps_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.ln2_hi);
const float32x4_t vln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.ln2_lo);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vxKLMN = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
const float32x4_t vzKLMN = vabsq_f32(vxKLMN);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vmlaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
float32x4_t vnKLMN = vmlaq_f32(vmagic_bias, vzKLMN, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 17);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 17);
const int32x4_t veGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 17);
const int32x4_t veKLMN = vshlq_n_s32(vreinterpretq_s32_f32(vnKLMN), 17);
// Use bits 0:6 bits of batch, as integer, as an index for table lookup of l := 2**(batch % 64).
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64x2_t vidxKLMN = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnKLMN), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxEF]);
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxIJ]);
const uint64_t vidxKL = vgetq_lane_u64(vidxKLMN, 0);
const uint64_t vidxMN = vgetq_lane_u64(vidxKLMN, 1);
float32x2_t vlKL = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxKL]);
float32x2_t vlMN = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxMN]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
vlKL = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxKL >> 32)], vlKL, 1);
vlMN = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxMN >> 32)], vlMN, 1);
const float32x4_t vlKLMN = vcombine_f32(vlKL, vlMN);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
const float32x4_t vsKLMN = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlKLMN), veKLMN));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
vnKLMN = vsubq_f32(vnKLMN, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vln2_hi);
float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vln2_hi);
float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vzCDEF, vnCDEF, vln2_hi);
float32x4_t vtGHIJ = vmlaq_f32(vzGHIJ, vnGHIJ, vln2_hi);
float32x4_t vtKLMN = vmlaq_f32(vzKLMN, vnKLMN, vln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vln2_lo);
vtGHIJ = vmlaq_f32(vtGHIJ, vnGHIJ, vln2_lo);
vtKLMN = vmlaq_f32(vtKLMN, vnKLMN, vln2_lo);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc2);
float32x4_t vpKLMN = vmulq_f32(vtKLMN, vc2);
vp0123 = vmlsq_f32(vt0123, vp0123, vt0123);
vp4567 = vmlsq_f32(vt4567, vp4567, vt4567);
vp89AB = vmlsq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vmlsq_f32(vtCDEF, vpCDEF, vtCDEF);
vpGHIJ = vmlsq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
vpKLMN = vmlsq_f32(vtKLMN, vpKLMN, vtKLMN);
const float32x4_t vy0123 = vmlsq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vmlsq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vmlsq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vmlsq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vyGHIJ = vmlsq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
const float32x4_t vyKLMN = vmlsq_f32(vsKLMN, vsKLMN, vpKLMN);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(vyGHIJ, vone);
const float32x4_t vdKLMN = vaddq_f32(vyKLMN, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
float32x4_t vrKLMN = vrecpeq_f32(vdKLMN);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
vrKLMN = vmulq_f32(vrKLMN, vrecpsq_f32(vrKLMN, vdKLMN));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
vrKLMN = vmulq_f32(vrKLMN, vrecpsq_f32(vrKLMN, vdKLMN));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
float32x4_t vfGHIJ = vmulq_f32(vyGHIJ, vrGHIJ);
float32x4_t vfKLMN = vmulq_f32(vyKLMN, vrKLMN);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
vfKLMN = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfKLMN), vcagtq_f32(vxKLMN, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
const uint32x4_t vmKLMN = vcltq_f32(vxKLMN, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vfKLMN = vbslq_f32(vmKLMN, vfKLMN, vsubq_f32(vone, vfKLMN));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vst1q_f32(output, vfKLMN); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlsq_f32(vt, vp, vt);
const float32x4_t vy = vmlsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlsq_f32(vt, vp, vt);
const float32x4_t vy = vmlsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 16,621 | 51.106583 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neon-rr2-lut64-p2-nr2recps-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__neon_rr2_lut64_p2_nr2recps_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.ln2_hi);
const float32x4_t vln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.ln2_lo);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.denorm_cutoff);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlsq_f32(vt, vp, vt);
const float32x4_t vy = vmlsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlsq_f32(vt, vp, vt);
const float32x4_t vy = vmlsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 5,066 | 39.214286 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neon-rr2-lut64-p2-nr2recps-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__neon_rr2_lut64_p2_nr2recps_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.ln2_hi);
const float32x4_t vln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.ln2_lo);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
// Use bits 0:6 bits of batch, as integer, as an index for table lookup of l := 2**(batch % 64).
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vln2_hi);
float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vln2_lo);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
vp0123 = vmlsq_f32(vt0123, vp0123, vt0123);
vp4567 = vmlsq_f32(vt4567, vp4567, vt4567);
const float32x4_t vy0123 = vmlsq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vmlsq_f32(vs4567, vs4567, vp4567);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlsq_f32(vt, vp, vt);
const float32x4_t vy = vmlsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlsq_f32(vt, vp, vt);
const float32x4_t vy = vmlsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 9,046 | 42.705314 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neon-rr2-p5-nr2recps-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__neon_rr2_p5_nr2recps_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_log2e);
const float32x4_t vln2_hi = vld1q_dup_f32(¶ms->neon_rr2_p5.ln2_hi);
const float32x4_t vln2_lo = vld1q_dup_f32(¶ms->neon_rr2_p5.ln2_lo);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_rr2_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_rr2_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neon_rr2_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_p5.denorm_cutoff);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vln2_hi);
float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vln2_hi);
float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vmlaq_f32(vc4, vc5, vt89AB);
vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
vp89AB = vmlaq_f32(vc3, vp89AB, vt89AB);
vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
vp89AB = vmlaq_f32(vc2, vp89AB, vt89AB);
vp0123 = vmlaq_f32(vc1, vp0123, vt0123);
vp4567 = vmlaq_f32(vc1, vp4567, vt4567);
vp89AB = vmlaq_f32(vc1, vp89AB, vt89AB);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
const float32x4_t ve0123 = vmlaq_f32(vs0123, vp0123, vt0123);
const float32x4_t ve4567 = vmlaq_f32(vs4567, vp4567, vt4567);
const float32x4_t ve89AB = vmlaq_f32(vs89AB, vp89AB, vt89AB);
const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
const float32x4_t vd89AB = vaddq_f32(ve89AB, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
float32x4_t vf0123 = vmulq_f32(ve0123, vr0123);
float32x4_t vf4567 = vmulq_f32(ve4567, vr4567);
float32x4_t vf89AB = vmulq_f32(ve89AB, vr89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vmlaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vmlaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 7,936 | 38.685 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neon-rr2-p5-nr2recps-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__neon_rr2_p5_nr2recps_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_log2e);
const float32x4_t vln2_hi = vld1q_dup_f32(¶ms->neon_rr2_p5.ln2_hi);
const float32x4_t vln2_lo = vld1q_dup_f32(¶ms->neon_rr2_p5.ln2_lo);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_rr2_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_rr2_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neon_rr2_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_p5.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vln2_hi);
float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vln2_hi);
float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vzCDEF, vnCDEF, vln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vmlaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vmlaq_f32(vc4, vc5, vtCDEF);
vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
vp89AB = vmlaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc3, vpCDEF, vtCDEF);
vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
vp89AB = vmlaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc2, vpCDEF, vtCDEF);
vp0123 = vmlaq_f32(vc1, vp0123, vt0123);
vp4567 = vmlaq_f32(vc1, vp4567, vt4567);
vp89AB = vmlaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc1, vpCDEF, vtCDEF);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
const float32x4_t ve0123 = vmlaq_f32(vs0123, vp0123, vt0123);
const float32x4_t ve4567 = vmlaq_f32(vs4567, vp4567, vt4567);
const float32x4_t ve89AB = vmlaq_f32(vs89AB, vp89AB, vt89AB);
const float32x4_t veCDEF = vmlaq_f32(vsCDEF, vpCDEF, vtCDEF);
const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
const float32x4_t vd89AB = vaddq_f32(ve89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(veCDEF, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
float32x4_t vf0123 = vmulq_f32(ve0123, vr0123);
float32x4_t vf4567 = vmulq_f32(ve4567, vr4567);
float32x4_t vf89AB = vmulq_f32(ve89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(veCDEF, vrCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vmlaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vmlaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 9,237 | 40.612613 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neon-rr2-p5-nr2recps-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__neon_rr2_p5_nr2recps_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_log2e);
const float32x4_t vln2_hi = vld1q_dup_f32(¶ms->neon_rr2_p5.ln2_hi);
const float32x4_t vln2_lo = vld1q_dup_f32(¶ms->neon_rr2_p5.ln2_lo);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_rr2_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_rr2_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neon_rr2_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_p5.denorm_cutoff);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vmlaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vln2_hi);
float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vln2_hi);
float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vzCDEF, vnCDEF, vln2_hi);
float32x4_t vtGHIJ = vmlaq_f32(vzGHIJ, vnGHIJ, vln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vln2_lo);
vtGHIJ = vmlaq_f32(vtGHIJ, vnGHIJ, vln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vmlaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vmlaq_f32(vc4, vc5, vtCDEF);
float32x4_t vpGHIJ = vmlaq_f32(vc4, vc5, vtGHIJ);
vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
vp89AB = vmlaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc3, vpCDEF, vtCDEF);
vpGHIJ = vmlaq_f32(vc3, vpGHIJ, vtGHIJ);
vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
vp89AB = vmlaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc2, vpCDEF, vtCDEF);
vpGHIJ = vmlaq_f32(vc2, vpGHIJ, vtGHIJ);
vp0123 = vmlaq_f32(vc1, vp0123, vt0123);
vp4567 = vmlaq_f32(vc1, vp4567, vt4567);
vp89AB = vmlaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc1, vpCDEF, vtCDEF);
vpGHIJ = vmlaq_f32(vc1, vpGHIJ, vtGHIJ);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
const float32x4_t ve0123 = vmlaq_f32(vs0123, vp0123, vt0123);
const float32x4_t ve4567 = vmlaq_f32(vs4567, vp4567, vt4567);
const float32x4_t ve89AB = vmlaq_f32(vs89AB, vp89AB, vt89AB);
const float32x4_t veCDEF = vmlaq_f32(vsCDEF, vpCDEF, vtCDEF);
const float32x4_t veGHIJ = vmlaq_f32(vsGHIJ, vpGHIJ, vtGHIJ);
const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
const float32x4_t vd89AB = vaddq_f32(ve89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(veCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(veGHIJ, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
float32x4_t vf0123 = vmulq_f32(ve0123, vr0123);
float32x4_t vf4567 = vmulq_f32(ve4567, vr4567);
float32x4_t vf89AB = vmulq_f32(ve89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(veCDEF, vrCDEF);
float32x4_t vfGHIJ = vmulq_f32(veGHIJ, vrGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vmlaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vmlaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 10,538 | 42.192623 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neon-rr2-p5-nr2recps-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__neon_rr2_p5_nr2recps_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_log2e);
const float32x4_t vln2_hi = vld1q_dup_f32(¶ms->neon_rr2_p5.ln2_hi);
const float32x4_t vln2_lo = vld1q_dup_f32(¶ms->neon_rr2_p5.ln2_lo);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_rr2_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_rr2_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neon_rr2_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_p5.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vxKLMN = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
const float32x4_t vzKLMN = vabsq_f32(vxKLMN);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vmlaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
float32x4_t vnKLMN = vmlaq_f32(vmagic_bias, vzKLMN, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
const float32x4_t vsKLMN = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnKLMN), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
vnKLMN = vsubq_f32(vnKLMN, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vln2_hi);
float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vln2_hi);
float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vzCDEF, vnCDEF, vln2_hi);
float32x4_t vtGHIJ = vmlaq_f32(vzGHIJ, vnGHIJ, vln2_hi);
float32x4_t vtKLMN = vmlaq_f32(vzKLMN, vnKLMN, vln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vln2_lo);
vtGHIJ = vmlaq_f32(vtGHIJ, vnGHIJ, vln2_lo);
vtKLMN = vmlaq_f32(vtKLMN, vnKLMN, vln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vmlaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vmlaq_f32(vc4, vc5, vtCDEF);
float32x4_t vpGHIJ = vmlaq_f32(vc4, vc5, vtGHIJ);
float32x4_t vpKLMN = vmlaq_f32(vc4, vc5, vtKLMN);
vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
vp89AB = vmlaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc3, vpCDEF, vtCDEF);
vpGHIJ = vmlaq_f32(vc3, vpGHIJ, vtGHIJ);
vpKLMN = vmlaq_f32(vc3, vpKLMN, vtKLMN);
vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
vp89AB = vmlaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc2, vpCDEF, vtCDEF);
vpGHIJ = vmlaq_f32(vc2, vpGHIJ, vtGHIJ);
vpKLMN = vmlaq_f32(vc2, vpKLMN, vtKLMN);
vp0123 = vmlaq_f32(vc1, vp0123, vt0123);
vp4567 = vmlaq_f32(vc1, vp4567, vt4567);
vp89AB = vmlaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc1, vpCDEF, vtCDEF);
vpGHIJ = vmlaq_f32(vc1, vpGHIJ, vtGHIJ);
vpKLMN = vmlaq_f32(vc1, vpKLMN, vtKLMN);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
vtKLMN = vmulq_f32(vtKLMN, vsKLMN);
const float32x4_t ve0123 = vmlaq_f32(vs0123, vp0123, vt0123);
const float32x4_t ve4567 = vmlaq_f32(vs4567, vp4567, vt4567);
const float32x4_t ve89AB = vmlaq_f32(vs89AB, vp89AB, vt89AB);
const float32x4_t veCDEF = vmlaq_f32(vsCDEF, vpCDEF, vtCDEF);
const float32x4_t veGHIJ = vmlaq_f32(vsGHIJ, vpGHIJ, vtGHIJ);
const float32x4_t veKLMN = vmlaq_f32(vsKLMN, vpKLMN, vtKLMN);
const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
const float32x4_t vd89AB = vaddq_f32(ve89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(veCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(veGHIJ, vone);
const float32x4_t vdKLMN = vaddq_f32(veKLMN, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
float32x4_t vrKLMN = vrecpeq_f32(vdKLMN);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
vrKLMN = vmulq_f32(vrKLMN, vrecpsq_f32(vrKLMN, vdKLMN));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
vrKLMN = vmulq_f32(vrKLMN, vrecpsq_f32(vrKLMN, vdKLMN));
float32x4_t vf0123 = vmulq_f32(ve0123, vr0123);
float32x4_t vf4567 = vmulq_f32(ve4567, vr4567);
float32x4_t vf89AB = vmulq_f32(ve89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(veCDEF, vrCDEF);
float32x4_t vfGHIJ = vmulq_f32(veGHIJ, vrGHIJ);
float32x4_t vfKLMN = vmulq_f32(veKLMN, vrKLMN);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
vfKLMN = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfKLMN), vcagtq_f32(vxKLMN, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
const uint32x4_t vmKLMN = vcltq_f32(vxKLMN, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vfKLMN = vbslq_f32(vmKLMN, vfKLMN, vsubq_f32(vone, vfKLMN));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vst1q_f32(output, vfKLMN); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vmlaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vmlaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 11,839 | 43.511278 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neon-rr2-p5-nr2recps-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__neon_rr2_p5_nr2recps_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_log2e);
const float32x4_t vln2_hi = vld1q_dup_f32(¶ms->neon_rr2_p5.ln2_hi);
const float32x4_t vln2_lo = vld1q_dup_f32(¶ms->neon_rr2_p5.ln2_lo);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_rr2_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_rr2_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neon_rr2_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_p5.denorm_cutoff);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vmlaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vmlaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 3,938 | 34.486486 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neon-rr2-p5-nr2recps-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__neon_rr2_p5_nr2recps_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_log2e);
const float32x4_t vln2_hi = vld1q_dup_f32(¶ms->neon_rr2_p5.ln2_hi);
const float32x4_t vln2_lo = vld1q_dup_f32(¶ms->neon_rr2_p5.ln2_lo);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_rr2_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_rr2_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neon_rr2_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_p5.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vln2_hi);
float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc4, vc5, vt4567);
vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
vp0123 = vmlaq_f32(vc1, vp0123, vt0123);
vp4567 = vmlaq_f32(vc1, vp4567, vt4567);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
const float32x4_t ve0123 = vmlaq_f32(vs0123, vp0123, vt0123);
const float32x4_t ve4567 = vmlaq_f32(vs4567, vp4567, vt4567);
const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
float32x4_t vf0123 = vmulq_f32(ve0123, vr0123);
float32x4_t vf4567 = vmulq_f32(ve4567, vr4567);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vmlaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
vt = vmlaq_f32(vt, vn, vln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vmlaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(ve, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 6,632 | 36.264045 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut2048-p1-nr1recps1fma-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut2048_p1_nr1recps1fma_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.ln2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.denorm_cutoff);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 12);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxAB]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
const float32x4_t vp89AB = vmulq_f32(vt89AB, vc1);
const float32x4_t vy0123 = vfmaq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmaq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 10,472 | 46.38914 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut2048-p1-nr1recps1fma-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut2048_p1_nr1recps1fma_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.ln2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 12);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 12);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxEF]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
const float32x4_t vp89AB = vmulq_f32(vt89AB, vc1);
const float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc1);
const float32x4_t vy0123 = vfmaq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmaq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vfmaq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 12,291 | 48.765182 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut2048-p1-nr1recps1fma-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut2048_p1_nr1recps1fma_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.ln2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.denorm_cutoff);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 12);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 12);
const int32x4_t veGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 12);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxEF]);
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxIJ]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vln2);
const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
const float32x4_t vp89AB = vmulq_f32(vt89AB, vc1);
const float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc1);
const float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc1);
const float32x4_t vy0123 = vfmaq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmaq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vfmaq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vyGHIJ = vfmaq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(vyGHIJ, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
vrGHIJ = vfmaq_f32(vrGHIJ, vrGHIJ, vfmsq_f32(vone, vrGHIJ, vdGHIJ));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
float32x4_t vfGHIJ = vmulq_f32(vyGHIJ, vrGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 14,110 | 50.688645 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut2048-p1-nr1recps1fma-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut2048_p1_nr1recps1fma_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.ln2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vxKLMN = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
const float32x4_t vzKLMN = vabsq_f32(vxKLMN);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
float32x4_t vnKLMN = vfmaq_f32(vmagic_bias, vzKLMN, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 12);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 12);
const int32x4_t veGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 12);
const int32x4_t veKLMN = vshlq_n_s32(vreinterpretq_s32_f32(vnKLMN), 12);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64x2_t vidxKLMN = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnKLMN), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxEF]);
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxIJ]);
const uint64_t vidxKL = vgetq_lane_u64(vidxKLMN, 0);
const uint64_t vidxMN = vgetq_lane_u64(vidxKLMN, 1);
float32x2_t vlKL = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxKL]);
float32x2_t vlMN = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxMN]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
vlKL = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxKL >> 32)], vlKL, 1);
vlMN = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxMN >> 32)], vlMN, 1);
const float32x4_t vlKLMN = vcombine_f32(vlKL, vlMN);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
const float32x4_t vsKLMN = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlKLMN), veKLMN));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
vnKLMN = vsubq_f32(vnKLMN, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vln2);
float32x4_t vtKLMN = vfmaq_f32(vzKLMN, vnKLMN, vln2);
const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
const float32x4_t vp89AB = vmulq_f32(vt89AB, vc1);
const float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc1);
const float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc1);
const float32x4_t vpKLMN = vmulq_f32(vtKLMN, vc1);
const float32x4_t vy0123 = vfmaq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmaq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vfmaq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vyGHIJ = vfmaq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
const float32x4_t vyKLMN = vfmaq_f32(vsKLMN, vsKLMN, vpKLMN);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(vyGHIJ, vone);
const float32x4_t vdKLMN = vaddq_f32(vyKLMN, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
float32x4_t vrKLMN = vrecpeq_f32(vdKLMN);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
vrKLMN = vmulq_f32(vrKLMN, vrecpsq_f32(vrKLMN, vdKLMN));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
vrGHIJ = vfmaq_f32(vrGHIJ, vrGHIJ, vfmsq_f32(vone, vrGHIJ, vdGHIJ));
vrKLMN = vfmaq_f32(vrKLMN, vrKLMN, vfmsq_f32(vone, vrKLMN, vdKLMN));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
float32x4_t vfGHIJ = vmulq_f32(vyGHIJ, vrGHIJ);
float32x4_t vfKLMN = vmulq_f32(vyKLMN, vrKLMN);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
vfKLMN = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfKLMN), vcagtq_f32(vxKLMN, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
const uint32x4_t vmKLMN = vcltq_f32(vxKLMN, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vfKLMN = vbslq_f32(vmKLMN, vfKLMN, vsubq_f32(vone, vfKLMN));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vst1q_f32(output, vfKLMN); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 15,929 | 52.277592 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut2048-p1-nr1recps1fma-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut2048_p1_nr1recps1fma_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.ln2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.denorm_cutoff);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 4,921 | 39.677686 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut2048-p1-nr1recps1fma-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut2048_p1_nr1recps1fma_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.ln2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
const float32x4_t vy0123 = vfmaq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmaq_f32(vs4567, vs4567, vp4567);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 8,650 | 43.364103 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut2048-p1-nr2fma-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut2048_p1_nr2fma_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.ln2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.denorm_cutoff);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 12);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxAB]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
const float32x4_t vp89AB = vmulq_f32(vt89AB, vc1);
const float32x4_t vy0123 = vfmaq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmaq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 10,518 | 46.597285 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut2048-p1-nr2fma-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut2048_p1_nr2fma_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.ln2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 12);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 12);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxEF]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
const float32x4_t vp89AB = vmulq_f32(vt89AB, vc1);
const float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc1);
const float32x4_t vy0123 = vfmaq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmaq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vfmaq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 12,349 | 49 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut2048-p1-nr2fma-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut2048_p1_nr2fma_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.ln2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.denorm_cutoff);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 12);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 12);
const int32x4_t veGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 12);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxEF]);
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxIJ]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vln2);
const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
const float32x4_t vp89AB = vmulq_f32(vt89AB, vc1);
const float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc1);
const float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc1);
const float32x4_t vy0123 = vfmaq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmaq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vfmaq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vyGHIJ = vfmaq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(vyGHIJ, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
vrGHIJ = vfmaq_f32(vrGHIJ, vrGHIJ, vfmsq_f32(vone, vrGHIJ, vdGHIJ));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
vrGHIJ = vfmaq_f32(vrGHIJ, vrGHIJ, vfmsq_f32(vone, vrGHIJ, vdGHIJ));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
float32x4_t vfGHIJ = vmulq_f32(vyGHIJ, vrGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 14,180 | 50.945055 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut2048-p1-nr2fma-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut2048_p1_nr2fma_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.ln2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vxKLMN = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
const float32x4_t vzKLMN = vabsq_f32(vxKLMN);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
float32x4_t vnKLMN = vfmaq_f32(vmagic_bias, vzKLMN, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 12);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 12);
const int32x4_t veGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 12);
const int32x4_t veKLMN = vshlq_n_s32(vreinterpretq_s32_f32(vnKLMN), 12);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64x2_t vidxKLMN = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnKLMN), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxEF]);
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxIJ]);
const uint64_t vidxKL = vgetq_lane_u64(vidxKLMN, 0);
const uint64_t vidxMN = vgetq_lane_u64(vidxKLMN, 1);
float32x2_t vlKL = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxKL]);
float32x2_t vlMN = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxMN]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
vlKL = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxKL >> 32)], vlKL, 1);
vlMN = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxMN >> 32)], vlMN, 1);
const float32x4_t vlKLMN = vcombine_f32(vlKL, vlMN);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
const float32x4_t vsKLMN = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlKLMN), veKLMN));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
vnKLMN = vsubq_f32(vnKLMN, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vln2);
float32x4_t vtKLMN = vfmaq_f32(vzKLMN, vnKLMN, vln2);
const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
const float32x4_t vp89AB = vmulq_f32(vt89AB, vc1);
const float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc1);
const float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc1);
const float32x4_t vpKLMN = vmulq_f32(vtKLMN, vc1);
const float32x4_t vy0123 = vfmaq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmaq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vfmaq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vyGHIJ = vfmaq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
const float32x4_t vyKLMN = vfmaq_f32(vsKLMN, vsKLMN, vpKLMN);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(vyGHIJ, vone);
const float32x4_t vdKLMN = vaddq_f32(vyKLMN, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
float32x4_t vrKLMN = vrecpeq_f32(vdKLMN);
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
vrGHIJ = vfmaq_f32(vrGHIJ, vrGHIJ, vfmsq_f32(vone, vrGHIJ, vdGHIJ));
vrKLMN = vfmaq_f32(vrKLMN, vrKLMN, vfmsq_f32(vone, vrKLMN, vdKLMN));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr89AB = vfmaq_f32(vr89AB, vr89AB, vfmsq_f32(vone, vr89AB, vd89AB));
vrCDEF = vfmaq_f32(vrCDEF, vrCDEF, vfmsq_f32(vone, vrCDEF, vdCDEF));
vrGHIJ = vfmaq_f32(vrGHIJ, vrGHIJ, vfmsq_f32(vone, vrGHIJ, vdGHIJ));
vrKLMN = vfmaq_f32(vrKLMN, vrKLMN, vfmsq_f32(vone, vrKLMN, vdKLMN));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
float32x4_t vfGHIJ = vmulq_f32(vyGHIJ, vrGHIJ);
float32x4_t vfKLMN = vmulq_f32(vyKLMN, vrKLMN);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
vfKLMN = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfKLMN), vcagtq_f32(vxKLMN, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
const uint32x4_t vmKLMN = vcltq_f32(vxKLMN, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vfKLMN = vbslq_f32(vmKLMN, vfKLMN, vsubq_f32(vone, vfKLMN));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vst1q_f32(output, vfKLMN); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 16,011 | 52.551839 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut2048-p1-nr2fma-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut2048_p1_nr2fma_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.ln2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.denorm_cutoff);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 4,931 | 39.760331 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut2048-p1-nr2fma-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut2048_p1_nr2fma_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.ln2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
const float32x4_t vy0123 = vfmaq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmaq_f32(vs4567, vs4567, vp4567);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
vr0123 = vfmaq_f32(vr0123, vr0123, vfmsq_f32(vone, vr0123, vd0123));
vr4567 = vfmaq_f32(vr4567, vr4567, vfmsq_f32(vone, vr4567, vd4567));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 8,684 | 43.538462 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut2048-p1-nr2recps-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut2048_p1_nr2recps_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.ln2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.denorm_cutoff);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 12);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxAB]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
const float32x4_t vp89AB = vmulq_f32(vt89AB, vc1);
const float32x4_t vy0123 = vfmaq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmaq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 10,416 | 46.135747 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut2048-p1-nr2recps-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut2048_p1_nr2recps_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.ln2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 12);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 12);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxEF]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
const float32x4_t vp89AB = vmulq_f32(vt89AB, vc1);
const float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc1);
const float32x4_t vy0123 = vfmaq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmaq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vfmaq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 12,223 | 48.489879 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-neonfma-rr1-lut2048-p1-nr2recps-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut2048_p1_nr2recps_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.ln2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.denorm_cutoff);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 12);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 12);
const int32x4_t veGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 12);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxEF]);
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxIJ]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vln2);
const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
const float32x4_t vp89AB = vmulq_f32(vt89AB, vc1);
const float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc1);
const float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc1);
const float32x4_t vy0123 = vfmaq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmaq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vfmaq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vyGHIJ = vfmaq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(vyGHIJ, vone);
float32x4_t vr0123 = vrecpeq_f32(vd0123);
float32x4_t vr4567 = vrecpeq_f32(vd4567);
float32x4_t vr89AB = vrecpeq_f32(vd89AB);
float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
float32x4_t vrGHIJ = vrecpeq_f32(vdGHIJ);
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
vrGHIJ = vmulq_f32(vrGHIJ, vrecpsq_f32(vrGHIJ, vdGHIJ));
float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
float32x4_t vfGHIJ = vmulq_f32(vyGHIJ, vrGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vr = vrecpeq_f32(vd);
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
float32x4_t vf = vmulq_f32(vy, vr);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 14,030 | 50.395604 | 113 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.