repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndz-sse41-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/sse41.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndz_ukernel__sse41_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vy0123 = _mm_round_ps(vx0123, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
const __m128 vy4567 = _mm_round_ps(vx4567, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vy = _mm_round_ps(vx, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vy = _mm_round_ps(vx, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 1,787 | 26.9375 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndz-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndz_ukernel__wasmsimd_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input); input += 4;
const v128_t vy0123 = wasm_f32x4_trunc(vx0123);
wasm_v128_store(output, vy0123); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vy = wasm_f32x4_trunc(vx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,312 | 24.745098 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vrnd/gen/f32-vrndz-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vrnd/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vrndz_ukernel__wasmsimd_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input); input += 4;
const v128_t vx4567 = wasm_v128_load(input); input += 4;
const v128_t vy0123 = wasm_f32x4_trunc(vx0123);
const v128_t vy4567 = wasm_f32x4_trunc(vx4567);
wasm_v128_store(output, vy0123); output += 4;
wasm_v128_store(output, vy4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input); input += 4;
const v128_t vy = wasm_f32x4_trunc(vx);
wasm_v128_store(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vy = wasm_f32x4_trunc(vx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,693 | 27.711864 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleexpminusmax/gen/f32-vscaleexpminusmax-avx2-p5-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleexpminusmax/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vscaleexpminusmax.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x16(
size_t batch,
const float* input,
float* output,
float scale,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vscale = _mm256_set1_ps(scale);
const __m256 vi_max = _mm256_set1_ps(max);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 (2x8) inputs at a time.
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
input += 16;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
// Multiply by scale.
vf0 = _mm256_mul_ps(vf0, vscale);
vf1 = _mm256_mul_ps(vf1, vscale);
// Store 16 (2x8) outputs at a time.
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Multiply by scale.
vf = _mm256_mul_ps(vf, vscale);
// Store 64 (8x8) outputs at a time.
_mm256_storeu_ps(output, vf);
output += 8;
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vi = _mm256_maskload_ps(input, vmask);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Multiply by scale.
vf = _mm256_mul_ps(vf, vscale);
// Store up to 7 outputs at a time.
_mm256_maskstore_ps(output, vmask, vf);
}
}
| 8,490 | 38.86385 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleexpminusmax/gen/f32-vscaleexpminusmax-avx2-p5-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleexpminusmax/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vscaleexpminusmax.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x24(
size_t batch,
const float* input,
float* output,
float scale,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vscale = _mm256_set1_ps(scale);
const __m256 vi_max = _mm256_set1_ps(max);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
// Load 24 (3x8) inputs at a time.
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
input += 24;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
// Multiply by scale.
vf0 = _mm256_mul_ps(vf0, vscale);
vf1 = _mm256_mul_ps(vf1, vscale);
vf2 = _mm256_mul_ps(vf2, vscale);
// Store 24 (3x8) outputs at a time.
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Multiply by scale.
vf = _mm256_mul_ps(vf, vscale);
// Store 64 (8x8) outputs at a time.
_mm256_storeu_ps(output, vf);
output += 8;
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vi = _mm256_maskload_ps(input, vmask);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Multiply by scale.
vf = _mm256_mul_ps(vf, vscale);
// Store up to 7 outputs at a time.
_mm256_maskstore_ps(output, vmask, vf);
}
}
| 9,318 | 39.694323 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleexpminusmax/gen/f32-vscaleexpminusmax-avx2-p5-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleexpminusmax/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vscaleexpminusmax.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x32(
size_t batch,
const float* input,
float* output,
float scale,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vscale = _mm256_set1_ps(scale);
const __m256 vi_max = _mm256_set1_ps(max);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
// Load 32 (4x8) inputs at a time.
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
input += 32;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
// Multiply by scale.
vf0 = _mm256_mul_ps(vf0, vscale);
vf1 = _mm256_mul_ps(vf1, vscale);
vf2 = _mm256_mul_ps(vf2, vscale);
vf3 = _mm256_mul_ps(vf3, vscale);
// Store 32 (4x8) outputs at a time.
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Multiply by scale.
vf = _mm256_mul_ps(vf, vscale);
// Store 64 (8x8) outputs at a time.
_mm256_storeu_ps(output, vf);
output += 8;
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vi = _mm256_maskload_ps(input, vmask);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Multiply by scale.
vf = _mm256_mul_ps(vf, vscale);
// Store up to 7 outputs at a time.
_mm256_maskstore_ps(output, vmask, vf);
}
}
| 10,146 | 40.416327 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleexpminusmax/gen/f32-vscaleexpminusmax-avx2-p5-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleexpminusmax/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vscaleexpminusmax.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x40(
size_t batch,
const float* input,
float* output,
float scale,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vscale = _mm256_set1_ps(scale);
const __m256 vi_max = _mm256_set1_ps(max);
for (; batch >= 40 * sizeof(float); batch -= 40 * sizeof(float)) {
// Load 40 (5x8) inputs at a time.
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
input += 40;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
// Multiply by scale.
vf0 = _mm256_mul_ps(vf0, vscale);
vf1 = _mm256_mul_ps(vf1, vscale);
vf2 = _mm256_mul_ps(vf2, vscale);
vf3 = _mm256_mul_ps(vf3, vscale);
vf4 = _mm256_mul_ps(vf4, vscale);
// Store 40 (5x8) outputs at a time.
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
output += 40;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Multiply by scale.
vf = _mm256_mul_ps(vf, vscale);
// Store 64 (8x8) outputs at a time.
_mm256_storeu_ps(output, vf);
output += 8;
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vi = _mm256_maskload_ps(input, vmask);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Multiply by scale.
vf = _mm256_mul_ps(vf, vscale);
// Store up to 7 outputs at a time.
_mm256_maskstore_ps(output, vmask, vf);
}
}
| 10,974 | 41.049808 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleexpminusmax/gen/f32-vscaleexpminusmax-avx2-p5-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleexpminusmax/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vscaleexpminusmax.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x48(
size_t batch,
const float* input,
float* output,
float scale,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vscale = _mm256_set1_ps(scale);
const __m256 vi_max = _mm256_set1_ps(max);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
// Load 48 (6x8) inputs at a time.
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
input += 48;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
// Multiply by scale.
vf0 = _mm256_mul_ps(vf0, vscale);
vf1 = _mm256_mul_ps(vf1, vscale);
vf2 = _mm256_mul_ps(vf2, vscale);
vf3 = _mm256_mul_ps(vf3, vscale);
vf4 = _mm256_mul_ps(vf4, vscale);
vf5 = _mm256_mul_ps(vf5, vscale);
// Store 48 (6x8) outputs at a time.
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
output += 48;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Multiply by scale.
vf = _mm256_mul_ps(vf, vscale);
// Store 64 (8x8) outputs at a time.
_mm256_storeu_ps(output, vf);
output += 8;
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vi = _mm256_maskload_ps(input, vmask);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Multiply by scale.
vf = _mm256_mul_ps(vf, vscale);
// Store up to 7 outputs at a time.
_mm256_maskstore_ps(output, vmask, vf);
}
}
| 11,802 | 41.610108 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleexpminusmax/gen/f32-vscaleexpminusmax-avx2-p5-x56.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleexpminusmax/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vscaleexpminusmax.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x56(
size_t batch,
const float* input,
float* output,
float scale,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vscale = _mm256_set1_ps(scale);
const __m256 vi_max = _mm256_set1_ps(max);
for (; batch >= 56 * sizeof(float); batch -= 56 * sizeof(float)) {
// Load 56 (7x8) inputs at a time.
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
input += 56;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
// Multiply by scale.
vf0 = _mm256_mul_ps(vf0, vscale);
vf1 = _mm256_mul_ps(vf1, vscale);
vf2 = _mm256_mul_ps(vf2, vscale);
vf3 = _mm256_mul_ps(vf3, vscale);
vf4 = _mm256_mul_ps(vf4, vscale);
vf5 = _mm256_mul_ps(vf5, vscale);
vf6 = _mm256_mul_ps(vf6, vscale);
// Store 56 (7x8) outputs at a time.
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
output += 56;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Multiply by scale.
vf = _mm256_mul_ps(vf, vscale);
// Store 64 (8x8) outputs at a time.
_mm256_storeu_ps(output, vf);
output += 8;
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vi = _mm256_maskload_ps(input, vmask);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Multiply by scale.
vf = _mm256_mul_ps(vf, vscale);
// Store up to 7 outputs at a time.
_mm256_maskstore_ps(output, vmask, vf);
}
}
| 12,630 | 42.109215 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleexpminusmax/gen/f32-vscaleexpminusmax-avx2-p5-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleexpminusmax/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vscaleexpminusmax.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x64(
size_t batch,
const float* input,
float* output,
float scale,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vscale = _mm256_set1_ps(scale);
const __m256 vi_max = _mm256_set1_ps(max);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
// Load 64 (8x8) inputs at a time.
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
input += 64;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
// Multiply by scale.
vf0 = _mm256_mul_ps(vf0, vscale);
vf1 = _mm256_mul_ps(vf1, vscale);
vf2 = _mm256_mul_ps(vf2, vscale);
vf3 = _mm256_mul_ps(vf3, vscale);
vf4 = _mm256_mul_ps(vf4, vscale);
vf5 = _mm256_mul_ps(vf5, vscale);
vf6 = _mm256_mul_ps(vf6, vscale);
vf7 = _mm256_mul_ps(vf7, vscale);
// Store 64 (8x8) outputs at a time.
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
output += 64;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Multiply by scale.
vf = _mm256_mul_ps(vf, vscale);
// Store 64 (8x8) outputs at a time.
_mm256_storeu_ps(output, vf);
output += 8;
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vi = _mm256_maskload_ps(input, vmask);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Multiply by scale.
vf = _mm256_mul_ps(vf, vscale);
// Store up to 7 outputs at a time.
_mm256_maskstore_ps(output, vmask, vf);
}
}
| 13,458 | 42.556634 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleexpminusmax/gen/f32-vscaleexpminusmax-avx2-p5-x72.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleexpminusmax/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vscaleexpminusmax.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x72(
size_t batch,
const float* input,
float* output,
float scale,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vscale = _mm256_set1_ps(scale);
const __m256 vi_max = _mm256_set1_ps(max);
for (; batch >= 72 * sizeof(float); batch -= 72 * sizeof(float)) {
// Load 72 (9x8) inputs at a time.
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
const __m256 vi8 = _mm256_loadu_ps(input + 64);
input += 72;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
// Multiply by scale.
vf0 = _mm256_mul_ps(vf0, vscale);
vf1 = _mm256_mul_ps(vf1, vscale);
vf2 = _mm256_mul_ps(vf2, vscale);
vf3 = _mm256_mul_ps(vf3, vscale);
vf4 = _mm256_mul_ps(vf4, vscale);
vf5 = _mm256_mul_ps(vf5, vscale);
vf6 = _mm256_mul_ps(vf6, vscale);
vf7 = _mm256_mul_ps(vf7, vscale);
vf8 = _mm256_mul_ps(vf8, vscale);
// Store 72 (9x8) outputs at a time.
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
output += 72;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Multiply by scale.
vf = _mm256_mul_ps(vf, vscale);
// Store 64 (8x8) outputs at a time.
_mm256_storeu_ps(output, vf);
output += 8;
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vi = _mm256_maskload_ps(input, vmask);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Multiply by scale.
vf = _mm256_mul_ps(vf, vscale);
// Store up to 7 outputs at a time.
_mm256_maskstore_ps(output, vmask, vf);
}
}
| 14,286 | 42.96 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleexpminusmax/gen/f32-vscaleexpminusmax-avx2-p5-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleexpminusmax/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vscaleexpminusmax.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x8(
size_t batch,
const float* input,
float* output,
float scale,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vscale = _mm256_set1_ps(scale);
const __m256 vi_max = _mm256_set1_ps(max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 (1x8) inputs at a time.
const __m256 vi0 = _mm256_loadu_ps(input);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 = _mm256_mul_ps(vt0, vs0);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
// Multiply by scale.
vf0 = _mm256_mul_ps(vf0, vscale);
// Store 8 (1x8) outputs at a time.
_mm256_storeu_ps(output, vf0);
output += 8;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Multiply by scale.
vf = _mm256_mul_ps(vf, vscale);
// Store 64 (8x8) outputs at a time.
_mm256_storeu_ps(output, vf);
output += 8;
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vi = _mm256_maskload_ps(input, vmask);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Multiply by scale.
vf = _mm256_mul_ps(vf, vscale);
// Store up to 7 outputs at a time.
_mm256_maskstore_ps(output, vmask, vf);
}
}
| 7,657 | 37.873096 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleexpminusmax/gen/f32-vscaleexpminusmax-avx2-p5-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleexpminusmax/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vscaleexpminusmax.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80(
size_t batch,
const float* input,
float* output,
float scale,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vscale = _mm256_set1_ps(scale);
const __m256 vi_max = _mm256_set1_ps(max);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
// Load 80 (10x8) inputs at a time.
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
const __m256 vi8 = _mm256_loadu_ps(input + 64);
const __m256 vi9 = _mm256_loadu_ps(input + 72);
input += 80;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
const __m256 vx9 = _mm256_sub_ps(vi9, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vx9, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_hi, vx9);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_lo, vt9);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
__m256 vf9 = _mm256_fmadd_ps(vt9, vp9, vs9);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vx9, vdenorm_cutoff, _CMP_LT_OS), vf9);
// Multiply by scale.
vf0 = _mm256_mul_ps(vf0, vscale);
vf1 = _mm256_mul_ps(vf1, vscale);
vf2 = _mm256_mul_ps(vf2, vscale);
vf3 = _mm256_mul_ps(vf3, vscale);
vf4 = _mm256_mul_ps(vf4, vscale);
vf5 = _mm256_mul_ps(vf5, vscale);
vf6 = _mm256_mul_ps(vf6, vscale);
vf7 = _mm256_mul_ps(vf7, vscale);
vf8 = _mm256_mul_ps(vf8, vscale);
vf9 = _mm256_mul_ps(vf9, vscale);
// Store 80 (10x8) outputs at a time.
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
_mm256_storeu_ps(output + 72, vf9);
output += 80;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Multiply by scale.
vf = _mm256_mul_ps(vf, vscale);
// Store 64 (8x8) outputs at a time.
_mm256_storeu_ps(output, vf);
output += 8;
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vi = _mm256_maskload_ps(input, vmask);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Multiply by scale.
vf = _mm256_mul_ps(vf, vscale);
// Store up to 7 outputs at a time.
_mm256_maskstore_ps(output, vmask, vf);
}
}
| 15,116 | 43.331378 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleexpminusmax/gen/f32-vscaleexpminusmax-avx2-p5-x88.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleexpminusmax/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vscaleexpminusmax.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x88(
size_t batch,
const float* input,
float* output,
float scale,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vscale = _mm256_set1_ps(scale);
const __m256 vi_max = _mm256_set1_ps(max);
for (; batch >= 88 * sizeof(float); batch -= 88 * sizeof(float)) {
// Load 88 (11x8) inputs at a time.
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
const __m256 vi8 = _mm256_loadu_ps(input + 64);
const __m256 vi9 = _mm256_loadu_ps(input + 72);
const __m256 vi10 = _mm256_loadu_ps(input + 80);
input += 88;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
const __m256 vx9 = _mm256_sub_ps(vi9, vi_max);
const __m256 vx10 = _mm256_sub_ps(vi10, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vx9, vlog2e, vmagic_bias);
__m256 vn10 = _mm256_fmadd_ps(vx10, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
const __m256 vs10 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn10), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
vn10 = _mm256_sub_ps(vn10, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_hi, vx9);
__m256 vt10 = _mm256_fmadd_ps(vn10, vminus_ln2_hi, vx10);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_lo, vt9);
vt10 = _mm256_fmadd_ps(vn10, vminus_ln2_lo, vt10);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
__m256 vp10 = _mm256_fmadd_ps(vc5, vt10, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
vt10 = _mm256_mul_ps(vt10, vs10);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
__m256 vf9 = _mm256_fmadd_ps(vt9, vp9, vs9);
__m256 vf10 = _mm256_fmadd_ps(vt10, vp10, vs10);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vx9, vdenorm_cutoff, _CMP_LT_OS), vf9);
vf10 = _mm256_andnot_ps(_mm256_cmp_ps(vx10, vdenorm_cutoff, _CMP_LT_OS), vf10);
// Multiply by scale.
vf0 = _mm256_mul_ps(vf0, vscale);
vf1 = _mm256_mul_ps(vf1, vscale);
vf2 = _mm256_mul_ps(vf2, vscale);
vf3 = _mm256_mul_ps(vf3, vscale);
vf4 = _mm256_mul_ps(vf4, vscale);
vf5 = _mm256_mul_ps(vf5, vscale);
vf6 = _mm256_mul_ps(vf6, vscale);
vf7 = _mm256_mul_ps(vf7, vscale);
vf8 = _mm256_mul_ps(vf8, vscale);
vf9 = _mm256_mul_ps(vf9, vscale);
vf10 = _mm256_mul_ps(vf10, vscale);
// Store 88 (11x8) outputs at a time.
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
_mm256_storeu_ps(output + 72, vf9);
_mm256_storeu_ps(output + 80, vf10);
output += 88;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Multiply by scale.
vf = _mm256_mul_ps(vf, vscale);
// Store 64 (8x8) outputs at a time.
_mm256_storeu_ps(output, vf);
output += 8;
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vi = _mm256_maskload_ps(input, vmask);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Multiply by scale.
vf = _mm256_mul_ps(vf, vscale);
// Store up to 7 outputs at a time.
_mm256_maskstore_ps(output, vmask, vf);
}
}
| 15,983 | 43.773109 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleexpminusmax/gen/f32-vscaleexpminusmax-avx2-p5-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleexpminusmax/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vscaleexpminusmax.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x96(
size_t batch,
const float* input,
float* output,
float scale,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
// The smallest x for which expf(x) is normalized.
const __m256 vdenorm_cutoff = _mm256_set1_ps(-0x1.5D589Ep6f);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vscale = _mm256_set1_ps(scale);
const __m256 vi_max = _mm256_set1_ps(max);
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
// Load 96 (12x8) inputs at a time.
const __m256 vi0 = _mm256_loadu_ps(input);
const __m256 vi1 = _mm256_loadu_ps(input + 8);
const __m256 vi2 = _mm256_loadu_ps(input + 16);
const __m256 vi3 = _mm256_loadu_ps(input + 24);
const __m256 vi4 = _mm256_loadu_ps(input + 32);
const __m256 vi5 = _mm256_loadu_ps(input + 40);
const __m256 vi6 = _mm256_loadu_ps(input + 48);
const __m256 vi7 = _mm256_loadu_ps(input + 56);
const __m256 vi8 = _mm256_loadu_ps(input + 64);
const __m256 vi9 = _mm256_loadu_ps(input + 72);
const __m256 vi10 = _mm256_loadu_ps(input + 80);
const __m256 vi11 = _mm256_loadu_ps(input + 88);
input += 96;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
const __m256 vx8 = _mm256_sub_ps(vi8, vi_max);
const __m256 vx9 = _mm256_sub_ps(vi9, vi_max);
const __m256 vx10 = _mm256_sub_ps(vi10, vi_max);
const __m256 vx11 = _mm256_sub_ps(vi11, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vx9, vlog2e, vmagic_bias);
__m256 vn10 = _mm256_fmadd_ps(vx10, vlog2e, vmagic_bias);
__m256 vn11 = _mm256_fmadd_ps(vx11, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
const __m256 vs10 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn10), 23));
const __m256 vs11 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn11), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
vn10 = _mm256_sub_ps(vn10, vmagic_bias);
vn11 = _mm256_sub_ps(vn11, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_hi, vx9);
__m256 vt10 = _mm256_fmadd_ps(vn10, vminus_ln2_hi, vx10);
__m256 vt11 = _mm256_fmadd_ps(vn11, vminus_ln2_hi, vx11);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_lo, vt9);
vt10 = _mm256_fmadd_ps(vn10, vminus_ln2_lo, vt10);
vt11 = _mm256_fmadd_ps(vn11, vminus_ln2_lo, vt11);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
__m256 vp10 = _mm256_fmadd_ps(vc5, vt10, vc4);
__m256 vp11 = _mm256_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
vt10 = _mm256_mul_ps(vt10, vs10);
vt11 = _mm256_mul_ps(vt11, vs11);
__m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
__m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
__m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
__m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
__m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
__m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
__m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
__m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
__m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8);
__m256 vf9 = _mm256_fmadd_ps(vt9, vp9, vs9);
__m256 vf10 = _mm256_fmadd_ps(vt10, vp10, vs10);
__m256 vf11 = _mm256_fmadd_ps(vt11, vp11, vs11);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vx9, vdenorm_cutoff, _CMP_LT_OS), vf9);
vf10 = _mm256_andnot_ps(_mm256_cmp_ps(vx10, vdenorm_cutoff, _CMP_LT_OS), vf10);
vf11 = _mm256_andnot_ps(_mm256_cmp_ps(vx11, vdenorm_cutoff, _CMP_LT_OS), vf11);
// Multiply by scale.
vf0 = _mm256_mul_ps(vf0, vscale);
vf1 = _mm256_mul_ps(vf1, vscale);
vf2 = _mm256_mul_ps(vf2, vscale);
vf3 = _mm256_mul_ps(vf3, vscale);
vf4 = _mm256_mul_ps(vf4, vscale);
vf5 = _mm256_mul_ps(vf5, vscale);
vf6 = _mm256_mul_ps(vf6, vscale);
vf7 = _mm256_mul_ps(vf7, vscale);
vf8 = _mm256_mul_ps(vf8, vscale);
vf9 = _mm256_mul_ps(vf9, vscale);
vf10 = _mm256_mul_ps(vf10, vscale);
vf11 = _mm256_mul_ps(vf11, vscale);
// Store 96 (12x8) outputs at a time.
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
_mm256_storeu_ps(output + 72, vf9);
_mm256_storeu_ps(output + 80, vf10);
_mm256_storeu_ps(output + 88, vf11);
output += 96;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Multiply by scale.
vf = _mm256_mul_ps(vf, vscale);
// Store 64 (8x8) outputs at a time.
_mm256_storeu_ps(output, vf);
output += 8;
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vi = _mm256_maskload_ps(input, vmask);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m256 vx = _mm256_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm256_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
// Multiply by scale.
vf = _mm256_mul_ps(vf, vscale);
// Store up to 7 outputs at a time.
_mm256_maskstore_ps(output, vmask, vf);
}
}
| 16,850 | 44.176944 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleexpminusmax/gen/f32-vscaleexpminusmax-avx512f-p5-scalef-x112.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleexpminusmax/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vscaleexpminusmax.h>
void xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x112(
size_t batch,
const float* input,
float* output,
float scale,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vscale = _mm512_set1_ps(scale);
const __m512 vi_max = _mm512_set1_ps(max);
for (; batch >= 112 * sizeof(float); batch -= 112 * sizeof(float)) {
// Load 112 (7x16) inputs at a time.
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
input += 112;
// Subtract maximum input x := i - i_max.
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
__m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
__m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
__m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
__m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
__m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
__m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf0 = _mm512_scalef_ps(vp0, vn0);
__m512 vf1 = _mm512_scalef_ps(vp1, vn1);
__m512 vf2 = _mm512_scalef_ps(vp2, vn2);
__m512 vf3 = _mm512_scalef_ps(vp3, vn3);
__m512 vf4 = _mm512_scalef_ps(vp4, vn4);
__m512 vf5 = _mm512_scalef_ps(vp5, vn5);
__m512 vf6 = _mm512_scalef_ps(vp6, vn6);
// Multiply by scale.
vf0 = _mm512_mul_ps(vf0, vscale);
vf1 = _mm512_mul_ps(vf1, vscale);
vf2 = _mm512_mul_ps(vf2, vscale);
vf3 = _mm512_mul_ps(vf3, vscale);
vf4 = _mm512_mul_ps(vf4, vscale);
vf5 = _mm512_mul_ps(vf5, vscale);
vf6 = _mm512_mul_ps(vf6, vscale);
// Store 112 (7x16) outputs at a time.
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 0, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
output += 112;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf = _mm512_scalef_ps(vp, vn);
// Multiply by scale.
vf = _mm512_mul_ps(vf, vscale);
// Store 16 outputs at a time.
_mm512_storeu_ps(output, vf);
output += 16;
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vi = _mm512_mask_loadu_ps(_mm512_undefined_ps(), vmask, input);
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf = _mm512_scalef_ps(vp, vn);
// Multiply by scale.
vf = _mm512_mul_ps(vf, vscale);
// Store up to 15 outputs at a time.
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 9,182 | 37.911017 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleexpminusmax/gen/f32-vscaleexpminusmax-avx512f-p5-scalef-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleexpminusmax/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vscaleexpminusmax.h>
void xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x128(
size_t batch,
const float* input,
float* output,
float scale,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vscale = _mm512_set1_ps(scale);
const __m512 vi_max = _mm512_set1_ps(max);
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
// Load 128 (8x16) inputs at a time.
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
input += 128;
// Subtract maximum input x := i - i_max.
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
__m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
__m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
__m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
__m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
__m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
__m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
__m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf0 = _mm512_scalef_ps(vp0, vn0);
__m512 vf1 = _mm512_scalef_ps(vp1, vn1);
__m512 vf2 = _mm512_scalef_ps(vp2, vn2);
__m512 vf3 = _mm512_scalef_ps(vp3, vn3);
__m512 vf4 = _mm512_scalef_ps(vp4, vn4);
__m512 vf5 = _mm512_scalef_ps(vp5, vn5);
__m512 vf6 = _mm512_scalef_ps(vp6, vn6);
__m512 vf7 = _mm512_scalef_ps(vp7, vn7);
// Multiply by scale.
vf0 = _mm512_mul_ps(vf0, vscale);
vf1 = _mm512_mul_ps(vf1, vscale);
vf2 = _mm512_mul_ps(vf2, vscale);
vf3 = _mm512_mul_ps(vf3, vscale);
vf4 = _mm512_mul_ps(vf4, vscale);
vf5 = _mm512_mul_ps(vf5, vscale);
vf6 = _mm512_mul_ps(vf6, vscale);
vf7 = _mm512_mul_ps(vf7, vscale);
// Store 128 (8x16) outputs at a time.
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 0, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
output += 128;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf = _mm512_scalef_ps(vp, vn);
// Multiply by scale.
vf = _mm512_mul_ps(vf, vscale);
// Store 16 outputs at a time.
_mm512_storeu_ps(output, vf);
output += 16;
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vi = _mm512_mask_loadu_ps(_mm512_undefined_ps(), vmask, input);
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf = _mm512_scalef_ps(vp, vn);
// Multiply by scale.
vf = _mm512_mul_ps(vf, vscale);
// Store up to 15 outputs at a time.
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 9,808 | 38.393574 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleexpminusmax/gen/f32-vscaleexpminusmax-avx512f-p5-scalef-x144.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleexpminusmax/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vscaleexpminusmax.h>
void xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x144(
size_t batch,
const float* input,
float* output,
float scale,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vscale = _mm512_set1_ps(scale);
const __m512 vi_max = _mm512_set1_ps(max);
for (; batch >= 144 * sizeof(float); batch -= 144 * sizeof(float)) {
// Load 144 (9x16) inputs at a time.
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
const __m512 vi8 = _mm512_loadu_ps(input + 128);
input += 144;
// Subtract maximum input x := i - i_max.
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
const __m512 vx8 = _mm512_sub_ps(vi8, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
__m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
__m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
__m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
__m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
__m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
__m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
__m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
__m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf0 = _mm512_scalef_ps(vp0, vn0);
__m512 vf1 = _mm512_scalef_ps(vp1, vn1);
__m512 vf2 = _mm512_scalef_ps(vp2, vn2);
__m512 vf3 = _mm512_scalef_ps(vp3, vn3);
__m512 vf4 = _mm512_scalef_ps(vp4, vn4);
__m512 vf5 = _mm512_scalef_ps(vp5, vn5);
__m512 vf6 = _mm512_scalef_ps(vp6, vn6);
__m512 vf7 = _mm512_scalef_ps(vp7, vn7);
__m512 vf8 = _mm512_scalef_ps(vp8, vn8);
// Multiply by scale.
vf0 = _mm512_mul_ps(vf0, vscale);
vf1 = _mm512_mul_ps(vf1, vscale);
vf2 = _mm512_mul_ps(vf2, vscale);
vf3 = _mm512_mul_ps(vf3, vscale);
vf4 = _mm512_mul_ps(vf4, vscale);
vf5 = _mm512_mul_ps(vf5, vscale);
vf6 = _mm512_mul_ps(vf6, vscale);
vf7 = _mm512_mul_ps(vf7, vscale);
vf8 = _mm512_mul_ps(vf8, vscale);
// Store 144 (9x16) outputs at a time.
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 0, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
_mm512_storeu_ps(output + 128, vf8);
output += 144;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf = _mm512_scalef_ps(vp, vn);
// Multiply by scale.
vf = _mm512_mul_ps(vf, vscale);
// Store 16 outputs at a time.
_mm512_storeu_ps(output, vf);
output += 16;
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vi = _mm512_mask_loadu_ps(_mm512_undefined_ps(), vmask, input);
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf = _mm512_scalef_ps(vp, vn);
// Multiply by scale.
vf = _mm512_mul_ps(vf, vscale);
// Store up to 15 outputs at a time.
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 10,434 | 38.828244 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleexpminusmax/gen/f32-vscaleexpminusmax-avx512f-p5-scalef-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleexpminusmax/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vscaleexpminusmax.h>
void xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x16(
size_t batch,
const float* input,
float* output,
float scale,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vscale = _mm512_set1_ps(scale);
const __m512 vi_max = _mm512_set1_ps(max);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 (1x16) inputs at a time.
const __m512 vi0 = _mm512_loadu_ps(input);
input += 16;
// Subtract maximum input x := i - i_max.
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf0 = _mm512_scalef_ps(vp0, vn0);
// Multiply by scale.
vf0 = _mm512_mul_ps(vf0, vscale);
// Store 16 (1x16) outputs at a time.
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 0, vf0);
output += 16;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf = _mm512_scalef_ps(vp, vn);
// Multiply by scale.
vf = _mm512_mul_ps(vf, vscale);
// Store 16 outputs at a time.
_mm512_storeu_ps(output, vf);
output += 16;
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vi = _mm512_mask_loadu_ps(_mm512_undefined_ps(), vmask, input);
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf = _mm512_scalef_ps(vp, vn);
// Multiply by scale.
vf = _mm512_mul_ps(vf, vscale);
// Store up to 15 outputs at a time.
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 5,431 | 33.379747 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleexpminusmax/gen/f32-vscaleexpminusmax-avx512f-p5-scalef-x160.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleexpminusmax/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vscaleexpminusmax.h>
void xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x160(
size_t batch,
const float* input,
float* output,
float scale,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vscale = _mm512_set1_ps(scale);
const __m512 vi_max = _mm512_set1_ps(max);
for (; batch >= 160 * sizeof(float); batch -= 160 * sizeof(float)) {
// Load 160 (10x16) inputs at a time.
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
const __m512 vi8 = _mm512_loadu_ps(input + 128);
const __m512 vi9 = _mm512_loadu_ps(input + 144);
input += 160;
// Subtract maximum input x := i - i_max.
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
const __m512 vx8 = _mm512_sub_ps(vi8, vi_max);
const __m512 vx9 = _mm512_sub_ps(vi9, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
__m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
__m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
__m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
__m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
__m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
__m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
__m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
__m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
__m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_hi, vx9);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_lo, vt9);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf0 = _mm512_scalef_ps(vp0, vn0);
__m512 vf1 = _mm512_scalef_ps(vp1, vn1);
__m512 vf2 = _mm512_scalef_ps(vp2, vn2);
__m512 vf3 = _mm512_scalef_ps(vp3, vn3);
__m512 vf4 = _mm512_scalef_ps(vp4, vn4);
__m512 vf5 = _mm512_scalef_ps(vp5, vn5);
__m512 vf6 = _mm512_scalef_ps(vp6, vn6);
__m512 vf7 = _mm512_scalef_ps(vp7, vn7);
__m512 vf8 = _mm512_scalef_ps(vp8, vn8);
__m512 vf9 = _mm512_scalef_ps(vp9, vn9);
// Multiply by scale.
vf0 = _mm512_mul_ps(vf0, vscale);
vf1 = _mm512_mul_ps(vf1, vscale);
vf2 = _mm512_mul_ps(vf2, vscale);
vf3 = _mm512_mul_ps(vf3, vscale);
vf4 = _mm512_mul_ps(vf4, vscale);
vf5 = _mm512_mul_ps(vf5, vscale);
vf6 = _mm512_mul_ps(vf6, vscale);
vf7 = _mm512_mul_ps(vf7, vscale);
vf8 = _mm512_mul_ps(vf8, vscale);
vf9 = _mm512_mul_ps(vf9, vscale);
// Store 160 (10x16) outputs at a time.
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 0, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
_mm512_storeu_ps(output + 128, vf8);
_mm512_storeu_ps(output + 144, vf9);
output += 160;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf = _mm512_scalef_ps(vp, vn);
// Multiply by scale.
vf = _mm512_mul_ps(vf, vscale);
// Store 16 outputs at a time.
_mm512_storeu_ps(output, vf);
output += 16;
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vi = _mm512_mask_loadu_ps(_mm512_undefined_ps(), vmask, input);
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf = _mm512_scalef_ps(vp, vn);
// Multiply by scale.
vf = _mm512_mul_ps(vf, vscale);
// Store up to 15 outputs at a time.
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 11,062 | 39.229091 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleexpminusmax/gen/f32-vscaleexpminusmax-avx512f-p5-scalef-x176.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleexpminusmax/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vscaleexpminusmax.h>
void xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x176(
size_t batch,
const float* input,
float* output,
float scale,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vscale = _mm512_set1_ps(scale);
const __m512 vi_max = _mm512_set1_ps(max);
for (; batch >= 176 * sizeof(float); batch -= 176 * sizeof(float)) {
// Load 176 (11x16) inputs at a time.
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
const __m512 vi8 = _mm512_loadu_ps(input + 128);
const __m512 vi9 = _mm512_loadu_ps(input + 144);
const __m512 vi10 = _mm512_loadu_ps(input + 160);
input += 176;
// Subtract maximum input x := i - i_max.
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
const __m512 vx8 = _mm512_sub_ps(vi8, vi_max);
const __m512 vx9 = _mm512_sub_ps(vi9, vi_max);
const __m512 vx10 = _mm512_sub_ps(vi10, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
__m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
__m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
__m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
__m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
__m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
__m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
__m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
__m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
__m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
__m512 vn10 = _mm512_roundscale_ps(_mm512_mul_ps(vx10, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_hi, vx9);
__m512 vt10 = _mm512_fmadd_ps(vn10, vminus_ln2_hi, vx10);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_lo, vt9);
vt10 = _mm512_fmadd_ps(vn10, vminus_ln2_lo, vt10);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
__m512 vp10 = _mm512_fmadd_ps(vc5, vt10, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf0 = _mm512_scalef_ps(vp0, vn0);
__m512 vf1 = _mm512_scalef_ps(vp1, vn1);
__m512 vf2 = _mm512_scalef_ps(vp2, vn2);
__m512 vf3 = _mm512_scalef_ps(vp3, vn3);
__m512 vf4 = _mm512_scalef_ps(vp4, vn4);
__m512 vf5 = _mm512_scalef_ps(vp5, vn5);
__m512 vf6 = _mm512_scalef_ps(vp6, vn6);
__m512 vf7 = _mm512_scalef_ps(vp7, vn7);
__m512 vf8 = _mm512_scalef_ps(vp8, vn8);
__m512 vf9 = _mm512_scalef_ps(vp9, vn9);
__m512 vf10 = _mm512_scalef_ps(vp10, vn10);
// Multiply by scale.
vf0 = _mm512_mul_ps(vf0, vscale);
vf1 = _mm512_mul_ps(vf1, vscale);
vf2 = _mm512_mul_ps(vf2, vscale);
vf3 = _mm512_mul_ps(vf3, vscale);
vf4 = _mm512_mul_ps(vf4, vscale);
vf5 = _mm512_mul_ps(vf5, vscale);
vf6 = _mm512_mul_ps(vf6, vscale);
vf7 = _mm512_mul_ps(vf7, vscale);
vf8 = _mm512_mul_ps(vf8, vscale);
vf9 = _mm512_mul_ps(vf9, vscale);
vf10 = _mm512_mul_ps(vf10, vscale);
// Store 176 (11x16) outputs at a time.
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 0, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
_mm512_storeu_ps(output + 128, vf8);
_mm512_storeu_ps(output + 144, vf9);
_mm512_storeu_ps(output + 160, vf10);
output += 176;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf = _mm512_scalef_ps(vp, vn);
// Multiply by scale.
vf = _mm512_mul_ps(vf, vscale);
// Store 16 outputs at a time.
_mm512_storeu_ps(output, vf);
output += 16;
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vi = _mm512_mask_loadu_ps(_mm512_undefined_ps(), vmask, input);
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf = _mm512_scalef_ps(vp, vn);
// Multiply by scale.
vf = _mm512_mul_ps(vf, vscale);
// Store up to 15 outputs at a time.
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 11,719 | 39.694444 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleexpminusmax/gen/f32-vscaleexpminusmax-avx512f-p5-scalef-x192.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleexpminusmax/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vscaleexpminusmax.h>
void xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x192(
size_t batch,
const float* input,
float* output,
float scale,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vscale = _mm512_set1_ps(scale);
const __m512 vi_max = _mm512_set1_ps(max);
for (; batch >= 192 * sizeof(float); batch -= 192 * sizeof(float)) {
// Load 192 (12x16) inputs at a time.
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
const __m512 vi8 = _mm512_loadu_ps(input + 128);
const __m512 vi9 = _mm512_loadu_ps(input + 144);
const __m512 vi10 = _mm512_loadu_ps(input + 160);
const __m512 vi11 = _mm512_loadu_ps(input + 176);
input += 192;
// Subtract maximum input x := i - i_max.
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
const __m512 vx8 = _mm512_sub_ps(vi8, vi_max);
const __m512 vx9 = _mm512_sub_ps(vi9, vi_max);
const __m512 vx10 = _mm512_sub_ps(vi10, vi_max);
const __m512 vx11 = _mm512_sub_ps(vi11, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
__m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
__m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
__m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
__m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
__m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
__m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
__m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
__m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
__m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
__m512 vn10 = _mm512_roundscale_ps(_mm512_mul_ps(vx10, vlog2e), 0);
__m512 vn11 = _mm512_roundscale_ps(_mm512_mul_ps(vx11, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_hi, vx9);
__m512 vt10 = _mm512_fmadd_ps(vn10, vminus_ln2_hi, vx10);
__m512 vt11 = _mm512_fmadd_ps(vn11, vminus_ln2_hi, vx11);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_lo, vt9);
vt10 = _mm512_fmadd_ps(vn10, vminus_ln2_lo, vt10);
vt11 = _mm512_fmadd_ps(vn11, vminus_ln2_lo, vt11);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
__m512 vp10 = _mm512_fmadd_ps(vc5, vt10, vc4);
__m512 vp11 = _mm512_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc0);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf0 = _mm512_scalef_ps(vp0, vn0);
__m512 vf1 = _mm512_scalef_ps(vp1, vn1);
__m512 vf2 = _mm512_scalef_ps(vp2, vn2);
__m512 vf3 = _mm512_scalef_ps(vp3, vn3);
__m512 vf4 = _mm512_scalef_ps(vp4, vn4);
__m512 vf5 = _mm512_scalef_ps(vp5, vn5);
__m512 vf6 = _mm512_scalef_ps(vp6, vn6);
__m512 vf7 = _mm512_scalef_ps(vp7, vn7);
__m512 vf8 = _mm512_scalef_ps(vp8, vn8);
__m512 vf9 = _mm512_scalef_ps(vp9, vn9);
__m512 vf10 = _mm512_scalef_ps(vp10, vn10);
__m512 vf11 = _mm512_scalef_ps(vp11, vn11);
// Multiply by scale.
vf0 = _mm512_mul_ps(vf0, vscale);
vf1 = _mm512_mul_ps(vf1, vscale);
vf2 = _mm512_mul_ps(vf2, vscale);
vf3 = _mm512_mul_ps(vf3, vscale);
vf4 = _mm512_mul_ps(vf4, vscale);
vf5 = _mm512_mul_ps(vf5, vscale);
vf6 = _mm512_mul_ps(vf6, vscale);
vf7 = _mm512_mul_ps(vf7, vscale);
vf8 = _mm512_mul_ps(vf8, vscale);
vf9 = _mm512_mul_ps(vf9, vscale);
vf10 = _mm512_mul_ps(vf10, vscale);
vf11 = _mm512_mul_ps(vf11, vscale);
// Store 192 (12x16) outputs at a time.
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 0, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
_mm512_storeu_ps(output + 128, vf8);
_mm512_storeu_ps(output + 144, vf9);
_mm512_storeu_ps(output + 160, vf10);
_mm512_storeu_ps(output + 176, vf11);
output += 192;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf = _mm512_scalef_ps(vp, vn);
// Multiply by scale.
vf = _mm512_mul_ps(vf, vscale);
// Store 16 outputs at a time.
_mm512_storeu_ps(output, vf);
output += 16;
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vi = _mm512_mask_loadu_ps(_mm512_undefined_ps(), vmask, input);
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf = _mm512_scalef_ps(vp, vn);
// Multiply by scale.
vf = _mm512_mul_ps(vf, vscale);
// Store up to 15 outputs at a time.
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 12,376 | 40.119601 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleexpminusmax/gen/f32-vscaleexpminusmax-avx512f-p5-scalef-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleexpminusmax/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vscaleexpminusmax.h>
void xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x32(
size_t batch,
const float* input,
float* output,
float scale,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vscale = _mm512_set1_ps(scale);
const __m512 vi_max = _mm512_set1_ps(max);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
// Load 32 (2x16) inputs at a time.
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
input += 32;
// Subtract maximum input x := i - i_max.
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
__m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf0 = _mm512_scalef_ps(vp0, vn0);
__m512 vf1 = _mm512_scalef_ps(vp1, vn1);
// Multiply by scale.
vf0 = _mm512_mul_ps(vf0, vscale);
vf1 = _mm512_mul_ps(vf1, vscale);
// Store 32 (2x16) outputs at a time.
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 0, vf0);
_mm512_storeu_ps(output + 16, vf1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf = _mm512_scalef_ps(vp, vn);
// Multiply by scale.
vf = _mm512_mul_ps(vf, vscale);
// Store 16 outputs at a time.
_mm512_storeu_ps(output, vf);
output += 16;
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vi = _mm512_mask_loadu_ps(_mm512_undefined_ps(), vmask, input);
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf = _mm512_scalef_ps(vp, vn);
// Multiply by scale.
vf = _mm512_mul_ps(vf, vscale);
// Store up to 15 outputs at a time.
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 6,055 | 34.415205 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleexpminusmax/gen/f32-vscaleexpminusmax-avx512f-p5-scalef-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleexpminusmax/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vscaleexpminusmax.h>
void xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x48(
size_t batch,
const float* input,
float* output,
float scale,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vscale = _mm512_set1_ps(scale);
const __m512 vi_max = _mm512_set1_ps(max);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
// Load 48 (3x16) inputs at a time.
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
input += 48;
// Subtract maximum input x := i - i_max.
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
__m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
__m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf0 = _mm512_scalef_ps(vp0, vn0);
__m512 vf1 = _mm512_scalef_ps(vp1, vn1);
__m512 vf2 = _mm512_scalef_ps(vp2, vn2);
// Multiply by scale.
vf0 = _mm512_mul_ps(vf0, vscale);
vf1 = _mm512_mul_ps(vf1, vscale);
vf2 = _mm512_mul_ps(vf2, vscale);
// Store 48 (3x16) outputs at a time.
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 0, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
output += 48;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf = _mm512_scalef_ps(vp, vn);
// Multiply by scale.
vf = _mm512_mul_ps(vf, vscale);
// Store 16 outputs at a time.
_mm512_storeu_ps(output, vf);
output += 16;
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vi = _mm512_mask_loadu_ps(_mm512_undefined_ps(), vmask, input);
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf = _mm512_scalef_ps(vp, vn);
// Multiply by scale.
vf = _mm512_mul_ps(vf, vscale);
// Store up to 15 outputs at a time.
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 6,679 | 35.304348 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleexpminusmax/gen/f32-vscaleexpminusmax-avx512f-p5-scalef-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleexpminusmax/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vscaleexpminusmax.h>
void xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x64(
size_t batch,
const float* input,
float* output,
float scale,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vscale = _mm512_set1_ps(scale);
const __m512 vi_max = _mm512_set1_ps(max);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
// Load 64 (4x16) inputs at a time.
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
input += 64;
// Subtract maximum input x := i - i_max.
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
__m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
__m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
__m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf0 = _mm512_scalef_ps(vp0, vn0);
__m512 vf1 = _mm512_scalef_ps(vp1, vn1);
__m512 vf2 = _mm512_scalef_ps(vp2, vn2);
__m512 vf3 = _mm512_scalef_ps(vp3, vn3);
// Multiply by scale.
vf0 = _mm512_mul_ps(vf0, vscale);
vf1 = _mm512_mul_ps(vf1, vscale);
vf2 = _mm512_mul_ps(vf2, vscale);
vf3 = _mm512_mul_ps(vf3, vscale);
// Store 64 (4x16) outputs at a time.
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 0, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
output += 64;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf = _mm512_scalef_ps(vp, vn);
// Multiply by scale.
vf = _mm512_mul_ps(vf, vscale);
// Store 16 outputs at a time.
_mm512_storeu_ps(output, vf);
output += 16;
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vi = _mm512_mask_loadu_ps(_mm512_undefined_ps(), vmask, input);
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf = _mm512_scalef_ps(vp, vn);
// Multiply by scale.
vf = _mm512_mul_ps(vf, vscale);
// Store up to 15 outputs at a time.
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 7,303 | 36.076142 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleexpminusmax/gen/f32-vscaleexpminusmax-avx512f-p5-scalef-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleexpminusmax/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vscaleexpminusmax.h>
void xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x80(
size_t batch,
const float* input,
float* output,
float scale,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vscale = _mm512_set1_ps(scale);
const __m512 vi_max = _mm512_set1_ps(max);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
// Load 80 (5x16) inputs at a time.
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
input += 80;
// Subtract maximum input x := i - i_max.
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
__m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
__m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
__m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
__m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf0 = _mm512_scalef_ps(vp0, vn0);
__m512 vf1 = _mm512_scalef_ps(vp1, vn1);
__m512 vf2 = _mm512_scalef_ps(vp2, vn2);
__m512 vf3 = _mm512_scalef_ps(vp3, vn3);
__m512 vf4 = _mm512_scalef_ps(vp4, vn4);
// Multiply by scale.
vf0 = _mm512_mul_ps(vf0, vscale);
vf1 = _mm512_mul_ps(vf1, vscale);
vf2 = _mm512_mul_ps(vf2, vscale);
vf3 = _mm512_mul_ps(vf3, vscale);
vf4 = _mm512_mul_ps(vf4, vscale);
// Store 80 (5x16) outputs at a time.
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 0, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
output += 80;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf = _mm512_scalef_ps(vp, vn);
// Multiply by scale.
vf = _mm512_mul_ps(vf, vscale);
// Store 16 outputs at a time.
_mm512_storeu_ps(output, vf);
output += 16;
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vi = _mm512_mask_loadu_ps(_mm512_undefined_ps(), vmask, input);
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf = _mm512_scalef_ps(vp, vn);
// Multiply by scale.
vf = _mm512_mul_ps(vf, vscale);
// Store up to 15 outputs at a time.
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 7,927 | 36.752381 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleexpminusmax/gen/f32-vscaleexpminusmax-avx512f-p5-scalef-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleexpminusmax/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vscaleexpminusmax.h>
void xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x96(
size_t batch,
const float* input,
float* output,
float scale,
float max)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vscale = _mm512_set1_ps(scale);
const __m512 vi_max = _mm512_set1_ps(max);
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
// Load 96 (6x16) inputs at a time.
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
input += 96;
// Subtract maximum input x := i - i_max.
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
__m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
__m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
__m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
__m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
__m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf0 = _mm512_scalef_ps(vp0, vn0);
__m512 vf1 = _mm512_scalef_ps(vp1, vn1);
__m512 vf2 = _mm512_scalef_ps(vp2, vn2);
__m512 vf3 = _mm512_scalef_ps(vp3, vn3);
__m512 vf4 = _mm512_scalef_ps(vp4, vn4);
__m512 vf5 = _mm512_scalef_ps(vp5, vn5);
// Multiply by scale.
vf0 = _mm512_mul_ps(vf0, vscale);
vf1 = _mm512_mul_ps(vf1, vscale);
vf2 = _mm512_mul_ps(vf2, vscale);
vf3 = _mm512_mul_ps(vf3, vscale);
vf4 = _mm512_mul_ps(vf4, vscale);
vf5 = _mm512_mul_ps(vf5, vscale);
// Store 96 (6x16) outputs at a time.
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 0, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
output += 96;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf = _mm512_scalef_ps(vp, vn);
// Multiply by scale.
vf = _mm512_mul_ps(vf, vscale);
// Store 16 outputs at a time.
_mm512_storeu_ps(output, vf);
output += 16;
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vi = _mm512_mask_loadu_ps(_mm512_undefined_ps(), vmask, input);
// Subtract maximum input x := i - i_max.
const __m512 vx = _mm512_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Reconstruct the final f value:
// f = 2**batch * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = 2**batch * p
__m512 vf = _mm512_scalef_ps(vp, vn);
// Multiply by scale.
vf = _mm512_mul_ps(vf, vscale);
// Store up to 15 outputs at a time.
_mm512_mask_storeu_ps(output, vmask, vf);
}
}
| 8,551 | 37.349776 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleextexp/gen/f32-vscaleextexp-avx2-p5-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleextexp/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vscaleextexp.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vscaleextexp_ukernel__avx2_p5_x16(
size_t batch,
const float* input,
float* output,
float scale_value,
float scale_exp)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
// The smallest batch such that 2**batch is considered non-negligible.
// For smaller batch, 2**batch is replaced with zero.
const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vc0 = _mm256_set1_ps(1.0f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vscalev = _mm256_set1_ps(scale_value);
const __m256 vscalee = _mm256_set1_ps(scale_exp);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 (2x8) inputs at a time.
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av * exp2(be) * bv =
// = exp2(ae + be) * (av * bv)
__m256 vf0 = _mm256_mul_ps(vp0, vscalev);
__m256 vf1 = _mm256_mul_ps(vp1, vscalev);
__m256 ve0 = _mm256_add_ps(vn0, vscalee);
__m256 ve1 = _mm256_add_ps(vn1, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
// This replacement is done in two steps:
// 1. Clamp minimum e at -127.0.
// 2. Map e to scale factor 0.0 when e == -127.0
ve0 = _mm256_max_ps(ve0, vmin_exponent);
ve1 = _mm256_max_ps(ve1, vmin_exponent);
// Convert exponents into scale factors:
// - s = exp2(e) when e > -127.0
// - s = 0.0 when e <= -127.0
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve0, vmagic_bias)), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve1, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf0 = _mm256_mul_ps(vf0, vs0);
vf1 = _mm256_mul_ps(vf1, vs1);
// Store 16 (2x8) outputs at a time.
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m256 vf = _mm256_mul_ps(vp, vscalev);
__m256 ve = _mm256_add_ps(vn, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
ve = _mm256_max_ps(ve, vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf = _mm256_mul_ps(vf, vs);
// Store 8 results at a time.
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vx = _mm256_maskload_ps(input, vmask);
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m256 vf = _mm256_mul_ps(vp, vscalev);
__m256 ve = _mm256_add_ps(vn, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
ve = _mm256_max_ps(ve, vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf = _mm256_mul_ps(vf, vs);
// Store up to 7 inputs at a time.
_mm256_maskstore_ps(output, vmask, vf);
}
_mm256_zeroupper();
}
| 7,641 | 37.59596 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleextexp/gen/f32-vscaleextexp-avx2-p5-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleextexp/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vscaleextexp.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vscaleextexp_ukernel__avx2_p5_x24(
size_t batch,
const float* input,
float* output,
float scale_value,
float scale_exp)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
// The smallest batch such that 2**batch is considered non-negligible.
// For smaller batch, 2**batch is replaced with zero.
const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vc0 = _mm256_set1_ps(1.0f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vscalev = _mm256_set1_ps(scale_value);
const __m256 vscalee = _mm256_set1_ps(scale_exp);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
// Load 24 (3x8) inputs at a time.
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
input += 24;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn2 = _mm256_round_ps(_mm256_mul_ps(vx2, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av * exp2(be) * bv =
// = exp2(ae + be) * (av * bv)
__m256 vf0 = _mm256_mul_ps(vp0, vscalev);
__m256 vf1 = _mm256_mul_ps(vp1, vscalev);
__m256 vf2 = _mm256_mul_ps(vp2, vscalev);
__m256 ve0 = _mm256_add_ps(vn0, vscalee);
__m256 ve1 = _mm256_add_ps(vn1, vscalee);
__m256 ve2 = _mm256_add_ps(vn2, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
// This replacement is done in two steps:
// 1. Clamp minimum e at -127.0.
// 2. Map e to scale factor 0.0 when e == -127.0
ve0 = _mm256_max_ps(ve0, vmin_exponent);
ve1 = _mm256_max_ps(ve1, vmin_exponent);
ve2 = _mm256_max_ps(ve2, vmin_exponent);
// Convert exponents into scale factors:
// - s = exp2(e) when e > -127.0
// - s = 0.0 when e <= -127.0
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve0, vmagic_bias)), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve1, vmagic_bias)), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve2, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf0 = _mm256_mul_ps(vf0, vs0);
vf1 = _mm256_mul_ps(vf1, vs1);
vf2 = _mm256_mul_ps(vf2, vs2);
// Store 24 (3x8) outputs at a time.
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m256 vf = _mm256_mul_ps(vp, vscalev);
__m256 ve = _mm256_add_ps(vn, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
ve = _mm256_max_ps(ve, vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf = _mm256_mul_ps(vf, vs);
// Store 8 results at a time.
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vx = _mm256_maskload_ps(input, vmask);
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m256 vf = _mm256_mul_ps(vp, vscalev);
__m256 ve = _mm256_add_ps(vn, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
ve = _mm256_max_ps(ve, vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf = _mm256_mul_ps(vf, vs);
// Store up to 7 inputs at a time.
_mm256_maskstore_ps(output, vmask, vf);
}
_mm256_zeroupper();
}
| 8,469 | 38.765258 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleextexp/gen/f32-vscaleextexp-avx2-p5-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleextexp/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vscaleextexp.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vscaleextexp_ukernel__avx2_p5_x32(
size_t batch,
const float* input,
float* output,
float scale_value,
float scale_exp)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
// The smallest batch such that 2**batch is considered non-negligible.
// For smaller batch, 2**batch is replaced with zero.
const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vc0 = _mm256_set1_ps(1.0f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vscalev = _mm256_set1_ps(scale_value);
const __m256 vscalee = _mm256_set1_ps(scale_exp);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
// Load 32 (4x8) inputs at a time.
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
input += 32;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn2 = _mm256_round_ps(_mm256_mul_ps(vx2, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn3 = _mm256_round_ps(_mm256_mul_ps(vx3, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av * exp2(be) * bv =
// = exp2(ae + be) * (av * bv)
__m256 vf0 = _mm256_mul_ps(vp0, vscalev);
__m256 vf1 = _mm256_mul_ps(vp1, vscalev);
__m256 vf2 = _mm256_mul_ps(vp2, vscalev);
__m256 vf3 = _mm256_mul_ps(vp3, vscalev);
__m256 ve0 = _mm256_add_ps(vn0, vscalee);
__m256 ve1 = _mm256_add_ps(vn1, vscalee);
__m256 ve2 = _mm256_add_ps(vn2, vscalee);
__m256 ve3 = _mm256_add_ps(vn3, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
// This replacement is done in two steps:
// 1. Clamp minimum e at -127.0.
// 2. Map e to scale factor 0.0 when e == -127.0
ve0 = _mm256_max_ps(ve0, vmin_exponent);
ve1 = _mm256_max_ps(ve1, vmin_exponent);
ve2 = _mm256_max_ps(ve2, vmin_exponent);
ve3 = _mm256_max_ps(ve3, vmin_exponent);
// Convert exponents into scale factors:
// - s = exp2(e) when e > -127.0
// - s = 0.0 when e <= -127.0
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve0, vmagic_bias)), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve1, vmagic_bias)), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve2, vmagic_bias)), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve3, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf0 = _mm256_mul_ps(vf0, vs0);
vf1 = _mm256_mul_ps(vf1, vs1);
vf2 = _mm256_mul_ps(vf2, vs2);
vf3 = _mm256_mul_ps(vf3, vs3);
// Store 32 (4x8) outputs at a time.
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m256 vf = _mm256_mul_ps(vp, vscalev);
__m256 ve = _mm256_add_ps(vn, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
ve = _mm256_max_ps(ve, vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf = _mm256_mul_ps(vf, vs);
// Store 8 results at a time.
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vx = _mm256_maskload_ps(input, vmask);
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m256 vf = _mm256_mul_ps(vp, vscalev);
__m256 ve = _mm256_add_ps(vn, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
ve = _mm256_max_ps(ve, vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf = _mm256_mul_ps(vf, vs);
// Store up to 7 inputs at a time.
_mm256_maskstore_ps(output, vmask, vf);
}
_mm256_zeroupper();
}
| 9,297 | 39.780702 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleextexp/gen/f32-vscaleextexp-avx2-p5-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleextexp/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vscaleextexp.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vscaleextexp_ukernel__avx2_p5_x40(
size_t batch,
const float* input,
float* output,
float scale_value,
float scale_exp)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
// The smallest batch such that 2**batch is considered non-negligible.
// For smaller batch, 2**batch is replaced with zero.
const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vc0 = _mm256_set1_ps(1.0f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vscalev = _mm256_set1_ps(scale_value);
const __m256 vscalee = _mm256_set1_ps(scale_exp);
for (; batch >= 40 * sizeof(float); batch -= 40 * sizeof(float)) {
// Load 40 (5x8) inputs at a time.
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
input += 40;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn2 = _mm256_round_ps(_mm256_mul_ps(vx2, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn3 = _mm256_round_ps(_mm256_mul_ps(vx3, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn4 = _mm256_round_ps(_mm256_mul_ps(vx4, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av * exp2(be) * bv =
// = exp2(ae + be) * (av * bv)
__m256 vf0 = _mm256_mul_ps(vp0, vscalev);
__m256 vf1 = _mm256_mul_ps(vp1, vscalev);
__m256 vf2 = _mm256_mul_ps(vp2, vscalev);
__m256 vf3 = _mm256_mul_ps(vp3, vscalev);
__m256 vf4 = _mm256_mul_ps(vp4, vscalev);
__m256 ve0 = _mm256_add_ps(vn0, vscalee);
__m256 ve1 = _mm256_add_ps(vn1, vscalee);
__m256 ve2 = _mm256_add_ps(vn2, vscalee);
__m256 ve3 = _mm256_add_ps(vn3, vscalee);
__m256 ve4 = _mm256_add_ps(vn4, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
// This replacement is done in two steps:
// 1. Clamp minimum e at -127.0.
// 2. Map e to scale factor 0.0 when e == -127.0
ve0 = _mm256_max_ps(ve0, vmin_exponent);
ve1 = _mm256_max_ps(ve1, vmin_exponent);
ve2 = _mm256_max_ps(ve2, vmin_exponent);
ve3 = _mm256_max_ps(ve3, vmin_exponent);
ve4 = _mm256_max_ps(ve4, vmin_exponent);
// Convert exponents into scale factors:
// - s = exp2(e) when e > -127.0
// - s = 0.0 when e <= -127.0
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve0, vmagic_bias)), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve1, vmagic_bias)), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve2, vmagic_bias)), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve3, vmagic_bias)), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve4, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf0 = _mm256_mul_ps(vf0, vs0);
vf1 = _mm256_mul_ps(vf1, vs1);
vf2 = _mm256_mul_ps(vf2, vs2);
vf3 = _mm256_mul_ps(vf3, vs3);
vf4 = _mm256_mul_ps(vf4, vs4);
// Store 40 (5x8) outputs at a time.
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
output += 40;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m256 vf = _mm256_mul_ps(vp, vscalev);
__m256 ve = _mm256_add_ps(vn, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
ve = _mm256_max_ps(ve, vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf = _mm256_mul_ps(vf, vs);
// Store 8 results at a time.
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vx = _mm256_maskload_ps(input, vmask);
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m256 vf = _mm256_mul_ps(vp, vscalev);
__m256 ve = _mm256_add_ps(vn, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
ve = _mm256_max_ps(ve, vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf = _mm256_mul_ps(vf, vs);
// Store up to 7 inputs at a time.
_mm256_maskstore_ps(output, vmask, vf);
}
_mm256_zeroupper();
}
| 10,125 | 40.670782 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleextexp/gen/f32-vscaleextexp-avx2-p5-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleextexp/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vscaleextexp.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vscaleextexp_ukernel__avx2_p5_x48(
size_t batch,
const float* input,
float* output,
float scale_value,
float scale_exp)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
// The smallest batch such that 2**batch is considered non-negligible.
// For smaller batch, 2**batch is replaced with zero.
const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vc0 = _mm256_set1_ps(1.0f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vscalev = _mm256_set1_ps(scale_value);
const __m256 vscalee = _mm256_set1_ps(scale_exp);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
// Load 48 (6x8) inputs at a time.
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
input += 48;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn2 = _mm256_round_ps(_mm256_mul_ps(vx2, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn3 = _mm256_round_ps(_mm256_mul_ps(vx3, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn4 = _mm256_round_ps(_mm256_mul_ps(vx4, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn5 = _mm256_round_ps(_mm256_mul_ps(vx5, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av * exp2(be) * bv =
// = exp2(ae + be) * (av * bv)
__m256 vf0 = _mm256_mul_ps(vp0, vscalev);
__m256 vf1 = _mm256_mul_ps(vp1, vscalev);
__m256 vf2 = _mm256_mul_ps(vp2, vscalev);
__m256 vf3 = _mm256_mul_ps(vp3, vscalev);
__m256 vf4 = _mm256_mul_ps(vp4, vscalev);
__m256 vf5 = _mm256_mul_ps(vp5, vscalev);
__m256 ve0 = _mm256_add_ps(vn0, vscalee);
__m256 ve1 = _mm256_add_ps(vn1, vscalee);
__m256 ve2 = _mm256_add_ps(vn2, vscalee);
__m256 ve3 = _mm256_add_ps(vn3, vscalee);
__m256 ve4 = _mm256_add_ps(vn4, vscalee);
__m256 ve5 = _mm256_add_ps(vn5, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
// This replacement is done in two steps:
// 1. Clamp minimum e at -127.0.
// 2. Map e to scale factor 0.0 when e == -127.0
ve0 = _mm256_max_ps(ve0, vmin_exponent);
ve1 = _mm256_max_ps(ve1, vmin_exponent);
ve2 = _mm256_max_ps(ve2, vmin_exponent);
ve3 = _mm256_max_ps(ve3, vmin_exponent);
ve4 = _mm256_max_ps(ve4, vmin_exponent);
ve5 = _mm256_max_ps(ve5, vmin_exponent);
// Convert exponents into scale factors:
// - s = exp2(e) when e > -127.0
// - s = 0.0 when e <= -127.0
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve0, vmagic_bias)), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve1, vmagic_bias)), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve2, vmagic_bias)), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve3, vmagic_bias)), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve4, vmagic_bias)), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve5, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf0 = _mm256_mul_ps(vf0, vs0);
vf1 = _mm256_mul_ps(vf1, vs1);
vf2 = _mm256_mul_ps(vf2, vs2);
vf3 = _mm256_mul_ps(vf3, vs3);
vf4 = _mm256_mul_ps(vf4, vs4);
vf5 = _mm256_mul_ps(vf5, vs5);
// Store 48 (6x8) outputs at a time.
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
output += 48;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m256 vf = _mm256_mul_ps(vp, vscalev);
__m256 ve = _mm256_add_ps(vn, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
ve = _mm256_max_ps(ve, vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf = _mm256_mul_ps(vf, vs);
// Store 8 results at a time.
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vx = _mm256_maskload_ps(input, vmask);
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m256 vf = _mm256_mul_ps(vp, vscalev);
__m256 ve = _mm256_add_ps(vn, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
ve = _mm256_max_ps(ve, vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf = _mm256_mul_ps(vf, vs);
// Store up to 7 inputs at a time.
_mm256_maskstore_ps(output, vmask, vf);
}
_mm256_zeroupper();
}
| 10,953 | 41.457364 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleextexp/gen/f32-vscaleextexp-avx2-p5-x56.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleextexp/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vscaleextexp.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vscaleextexp_ukernel__avx2_p5_x56(
size_t batch,
const float* input,
float* output,
float scale_value,
float scale_exp)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
// The smallest batch such that 2**batch is considered non-negligible.
// For smaller batch, 2**batch is replaced with zero.
const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vc0 = _mm256_set1_ps(1.0f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vscalev = _mm256_set1_ps(scale_value);
const __m256 vscalee = _mm256_set1_ps(scale_exp);
for (; batch >= 56 * sizeof(float); batch -= 56 * sizeof(float)) {
// Load 56 (7x8) inputs at a time.
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
input += 56;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn2 = _mm256_round_ps(_mm256_mul_ps(vx2, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn3 = _mm256_round_ps(_mm256_mul_ps(vx3, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn4 = _mm256_round_ps(_mm256_mul_ps(vx4, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn5 = _mm256_round_ps(_mm256_mul_ps(vx5, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn6 = _mm256_round_ps(_mm256_mul_ps(vx6, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av * exp2(be) * bv =
// = exp2(ae + be) * (av * bv)
__m256 vf0 = _mm256_mul_ps(vp0, vscalev);
__m256 vf1 = _mm256_mul_ps(vp1, vscalev);
__m256 vf2 = _mm256_mul_ps(vp2, vscalev);
__m256 vf3 = _mm256_mul_ps(vp3, vscalev);
__m256 vf4 = _mm256_mul_ps(vp4, vscalev);
__m256 vf5 = _mm256_mul_ps(vp5, vscalev);
__m256 vf6 = _mm256_mul_ps(vp6, vscalev);
__m256 ve0 = _mm256_add_ps(vn0, vscalee);
__m256 ve1 = _mm256_add_ps(vn1, vscalee);
__m256 ve2 = _mm256_add_ps(vn2, vscalee);
__m256 ve3 = _mm256_add_ps(vn3, vscalee);
__m256 ve4 = _mm256_add_ps(vn4, vscalee);
__m256 ve5 = _mm256_add_ps(vn5, vscalee);
__m256 ve6 = _mm256_add_ps(vn6, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
// This replacement is done in two steps:
// 1. Clamp minimum e at -127.0.
// 2. Map e to scale factor 0.0 when e == -127.0
ve0 = _mm256_max_ps(ve0, vmin_exponent);
ve1 = _mm256_max_ps(ve1, vmin_exponent);
ve2 = _mm256_max_ps(ve2, vmin_exponent);
ve3 = _mm256_max_ps(ve3, vmin_exponent);
ve4 = _mm256_max_ps(ve4, vmin_exponent);
ve5 = _mm256_max_ps(ve5, vmin_exponent);
ve6 = _mm256_max_ps(ve6, vmin_exponent);
// Convert exponents into scale factors:
// - s = exp2(e) when e > -127.0
// - s = 0.0 when e <= -127.0
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve0, vmagic_bias)), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve1, vmagic_bias)), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve2, vmagic_bias)), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve3, vmagic_bias)), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve4, vmagic_bias)), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve5, vmagic_bias)), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve6, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf0 = _mm256_mul_ps(vf0, vs0);
vf1 = _mm256_mul_ps(vf1, vs1);
vf2 = _mm256_mul_ps(vf2, vs2);
vf3 = _mm256_mul_ps(vf3, vs3);
vf4 = _mm256_mul_ps(vf4, vs4);
vf5 = _mm256_mul_ps(vf5, vs5);
vf6 = _mm256_mul_ps(vf6, vs6);
// Store 56 (7x8) outputs at a time.
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
output += 56;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m256 vf = _mm256_mul_ps(vp, vscalev);
__m256 ve = _mm256_add_ps(vn, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
ve = _mm256_max_ps(ve, vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf = _mm256_mul_ps(vf, vs);
// Store 8 results at a time.
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vx = _mm256_maskload_ps(input, vmask);
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m256 vf = _mm256_mul_ps(vp, vscalev);
__m256 ve = _mm256_add_ps(vn, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
ve = _mm256_max_ps(ve, vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf = _mm256_mul_ps(vf, vs);
// Store up to 7 inputs at a time.
_mm256_maskstore_ps(output, vmask, vf);
}
_mm256_zeroupper();
}
| 11,781 | 42.157509 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleextexp/gen/f32-vscaleextexp-avx2-p5-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleextexp/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vscaleextexp.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vscaleextexp_ukernel__avx2_p5_x64(
size_t batch,
const float* input,
float* output,
float scale_value,
float scale_exp)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
// The smallest batch such that 2**batch is considered non-negligible.
// For smaller batch, 2**batch is replaced with zero.
const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vc0 = _mm256_set1_ps(1.0f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vscalev = _mm256_set1_ps(scale_value);
const __m256 vscalee = _mm256_set1_ps(scale_exp);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
// Load 64 (8x8) inputs at a time.
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
input += 64;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn2 = _mm256_round_ps(_mm256_mul_ps(vx2, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn3 = _mm256_round_ps(_mm256_mul_ps(vx3, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn4 = _mm256_round_ps(_mm256_mul_ps(vx4, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn5 = _mm256_round_ps(_mm256_mul_ps(vx5, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn6 = _mm256_round_ps(_mm256_mul_ps(vx6, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn7 = _mm256_round_ps(_mm256_mul_ps(vx7, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av * exp2(be) * bv =
// = exp2(ae + be) * (av * bv)
__m256 vf0 = _mm256_mul_ps(vp0, vscalev);
__m256 vf1 = _mm256_mul_ps(vp1, vscalev);
__m256 vf2 = _mm256_mul_ps(vp2, vscalev);
__m256 vf3 = _mm256_mul_ps(vp3, vscalev);
__m256 vf4 = _mm256_mul_ps(vp4, vscalev);
__m256 vf5 = _mm256_mul_ps(vp5, vscalev);
__m256 vf6 = _mm256_mul_ps(vp6, vscalev);
__m256 vf7 = _mm256_mul_ps(vp7, vscalev);
__m256 ve0 = _mm256_add_ps(vn0, vscalee);
__m256 ve1 = _mm256_add_ps(vn1, vscalee);
__m256 ve2 = _mm256_add_ps(vn2, vscalee);
__m256 ve3 = _mm256_add_ps(vn3, vscalee);
__m256 ve4 = _mm256_add_ps(vn4, vscalee);
__m256 ve5 = _mm256_add_ps(vn5, vscalee);
__m256 ve6 = _mm256_add_ps(vn6, vscalee);
__m256 ve7 = _mm256_add_ps(vn7, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
// This replacement is done in two steps:
// 1. Clamp minimum e at -127.0.
// 2. Map e to scale factor 0.0 when e == -127.0
ve0 = _mm256_max_ps(ve0, vmin_exponent);
ve1 = _mm256_max_ps(ve1, vmin_exponent);
ve2 = _mm256_max_ps(ve2, vmin_exponent);
ve3 = _mm256_max_ps(ve3, vmin_exponent);
ve4 = _mm256_max_ps(ve4, vmin_exponent);
ve5 = _mm256_max_ps(ve5, vmin_exponent);
ve6 = _mm256_max_ps(ve6, vmin_exponent);
ve7 = _mm256_max_ps(ve7, vmin_exponent);
// Convert exponents into scale factors:
// - s = exp2(e) when e > -127.0
// - s = 0.0 when e <= -127.0
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve0, vmagic_bias)), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve1, vmagic_bias)), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve2, vmagic_bias)), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve3, vmagic_bias)), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve4, vmagic_bias)), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve5, vmagic_bias)), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve6, vmagic_bias)), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve7, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf0 = _mm256_mul_ps(vf0, vs0);
vf1 = _mm256_mul_ps(vf1, vs1);
vf2 = _mm256_mul_ps(vf2, vs2);
vf3 = _mm256_mul_ps(vf3, vs3);
vf4 = _mm256_mul_ps(vf4, vs4);
vf5 = _mm256_mul_ps(vf5, vs5);
vf6 = _mm256_mul_ps(vf6, vs6);
vf7 = _mm256_mul_ps(vf7, vs7);
// Store 64 (8x8) outputs at a time.
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
output += 64;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m256 vf = _mm256_mul_ps(vp, vscalev);
__m256 ve = _mm256_add_ps(vn, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
ve = _mm256_max_ps(ve, vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf = _mm256_mul_ps(vf, vs);
// Store 8 results at a time.
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vx = _mm256_maskload_ps(input, vmask);
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m256 vf = _mm256_mul_ps(vp, vscalev);
__m256 ve = _mm256_add_ps(vn, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
ve = _mm256_max_ps(ve, vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf = _mm256_mul_ps(vf, vs);
// Store up to 7 inputs at a time.
_mm256_maskstore_ps(output, vmask, vf);
}
_mm256_zeroupper();
}
| 12,609 | 42.784722 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleextexp/gen/f32-vscaleextexp-avx2-p5-x72.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleextexp/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vscaleextexp.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vscaleextexp_ukernel__avx2_p5_x72(
size_t batch,
const float* input,
float* output,
float scale_value,
float scale_exp)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
// The smallest batch such that 2**batch is considered non-negligible.
// For smaller batch, 2**batch is replaced with zero.
const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vc0 = _mm256_set1_ps(1.0f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vscalev = _mm256_set1_ps(scale_value);
const __m256 vscalee = _mm256_set1_ps(scale_exp);
for (; batch >= 72 * sizeof(float); batch -= 72 * sizeof(float)) {
// Load 72 (9x8) inputs at a time.
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
input += 72;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn2 = _mm256_round_ps(_mm256_mul_ps(vx2, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn3 = _mm256_round_ps(_mm256_mul_ps(vx3, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn4 = _mm256_round_ps(_mm256_mul_ps(vx4, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn5 = _mm256_round_ps(_mm256_mul_ps(vx5, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn6 = _mm256_round_ps(_mm256_mul_ps(vx6, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn7 = _mm256_round_ps(_mm256_mul_ps(vx7, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn8 = _mm256_round_ps(_mm256_mul_ps(vx8, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av * exp2(be) * bv =
// = exp2(ae + be) * (av * bv)
__m256 vf0 = _mm256_mul_ps(vp0, vscalev);
__m256 vf1 = _mm256_mul_ps(vp1, vscalev);
__m256 vf2 = _mm256_mul_ps(vp2, vscalev);
__m256 vf3 = _mm256_mul_ps(vp3, vscalev);
__m256 vf4 = _mm256_mul_ps(vp4, vscalev);
__m256 vf5 = _mm256_mul_ps(vp5, vscalev);
__m256 vf6 = _mm256_mul_ps(vp6, vscalev);
__m256 vf7 = _mm256_mul_ps(vp7, vscalev);
__m256 vf8 = _mm256_mul_ps(vp8, vscalev);
__m256 ve0 = _mm256_add_ps(vn0, vscalee);
__m256 ve1 = _mm256_add_ps(vn1, vscalee);
__m256 ve2 = _mm256_add_ps(vn2, vscalee);
__m256 ve3 = _mm256_add_ps(vn3, vscalee);
__m256 ve4 = _mm256_add_ps(vn4, vscalee);
__m256 ve5 = _mm256_add_ps(vn5, vscalee);
__m256 ve6 = _mm256_add_ps(vn6, vscalee);
__m256 ve7 = _mm256_add_ps(vn7, vscalee);
__m256 ve8 = _mm256_add_ps(vn8, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
// This replacement is done in two steps:
// 1. Clamp minimum e at -127.0.
// 2. Map e to scale factor 0.0 when e == -127.0
ve0 = _mm256_max_ps(ve0, vmin_exponent);
ve1 = _mm256_max_ps(ve1, vmin_exponent);
ve2 = _mm256_max_ps(ve2, vmin_exponent);
ve3 = _mm256_max_ps(ve3, vmin_exponent);
ve4 = _mm256_max_ps(ve4, vmin_exponent);
ve5 = _mm256_max_ps(ve5, vmin_exponent);
ve6 = _mm256_max_ps(ve6, vmin_exponent);
ve7 = _mm256_max_ps(ve7, vmin_exponent);
ve8 = _mm256_max_ps(ve8, vmin_exponent);
// Convert exponents into scale factors:
// - s = exp2(e) when e > -127.0
// - s = 0.0 when e <= -127.0
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve0, vmagic_bias)), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve1, vmagic_bias)), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve2, vmagic_bias)), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve3, vmagic_bias)), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve4, vmagic_bias)), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve5, vmagic_bias)), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve6, vmagic_bias)), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve7, vmagic_bias)), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve8, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf0 = _mm256_mul_ps(vf0, vs0);
vf1 = _mm256_mul_ps(vf1, vs1);
vf2 = _mm256_mul_ps(vf2, vs2);
vf3 = _mm256_mul_ps(vf3, vs3);
vf4 = _mm256_mul_ps(vf4, vs4);
vf5 = _mm256_mul_ps(vf5, vs5);
vf6 = _mm256_mul_ps(vf6, vs6);
vf7 = _mm256_mul_ps(vf7, vs7);
vf8 = _mm256_mul_ps(vf8, vs8);
// Store 72 (9x8) outputs at a time.
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
output += 72;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m256 vf = _mm256_mul_ps(vp, vscalev);
__m256 ve = _mm256_add_ps(vn, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
ve = _mm256_max_ps(ve, vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf = _mm256_mul_ps(vf, vs);
// Store 8 results at a time.
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vx = _mm256_maskload_ps(input, vmask);
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m256 vf = _mm256_mul_ps(vp, vscalev);
__m256 ve = _mm256_add_ps(vn, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
ve = _mm256_max_ps(ve, vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf = _mm256_mul_ps(vf, vs);
// Store up to 7 inputs at a time.
_mm256_maskstore_ps(output, vmask, vf);
}
_mm256_zeroupper();
}
| 13,437 | 43.349835 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleextexp/gen/f32-vscaleextexp-avx2-p5-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleextexp/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vscaleextexp.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vscaleextexp_ukernel__avx2_p5_x8(
size_t batch,
const float* input,
float* output,
float scale_value,
float scale_exp)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
// The smallest batch such that 2**batch is considered non-negligible.
// For smaller batch, 2**batch is replaced with zero.
const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vc0 = _mm256_set1_ps(1.0f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vscalev = _mm256_set1_ps(scale_value);
const __m256 vscalee = _mm256_set1_ps(scale_exp);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 (1x8) inputs at a time.
const __m256 vx0 = _mm256_loadu_ps(input);
input += 8;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av * exp2(be) * bv =
// = exp2(ae + be) * (av * bv)
__m256 vf0 = _mm256_mul_ps(vp0, vscalev);
__m256 ve0 = _mm256_add_ps(vn0, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
// This replacement is done in two steps:
// 1. Clamp minimum e at -127.0.
// 2. Map e to scale factor 0.0 when e == -127.0
ve0 = _mm256_max_ps(ve0, vmin_exponent);
// Convert exponents into scale factors:
// - s = exp2(e) when e > -127.0
// - s = 0.0 when e <= -127.0
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve0, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf0 = _mm256_mul_ps(vf0, vs0);
// Store 8 (1x8) outputs at a time.
_mm256_storeu_ps(output, vf0);
output += 8;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m256 vf = _mm256_mul_ps(vp, vscalev);
__m256 ve = _mm256_add_ps(vn, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
ve = _mm256_max_ps(ve, vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf = _mm256_mul_ps(vf, vs);
// Store 8 results at a time.
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vx = _mm256_maskload_ps(input, vmask);
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m256 vf = _mm256_mul_ps(vp, vscalev);
__m256 ve = _mm256_add_ps(vn, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
ve = _mm256_max_ps(ve, vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf = _mm256_mul_ps(vf, vs);
// Store up to 7 inputs at a time.
_mm256_maskstore_ps(output, vmask, vf);
}
_mm256_zeroupper();
}
| 6,808 | 36.20765 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleextexp/gen/f32-vscaleextexp-avx2-p5-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleextexp/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vscaleextexp.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vscaleextexp_ukernel__avx2_p5_x80(
size_t batch,
const float* input,
float* output,
float scale_value,
float scale_exp)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
// The smallest batch such that 2**batch is considered non-negligible.
// For smaller batch, 2**batch is replaced with zero.
const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vc0 = _mm256_set1_ps(1.0f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vscalev = _mm256_set1_ps(scale_value);
const __m256 vscalee = _mm256_set1_ps(scale_exp);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
// Load 80 (10x8) inputs at a time.
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
const __m256 vx9 = _mm256_loadu_ps(input + 72);
input += 80;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn2 = _mm256_round_ps(_mm256_mul_ps(vx2, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn3 = _mm256_round_ps(_mm256_mul_ps(vx3, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn4 = _mm256_round_ps(_mm256_mul_ps(vx4, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn5 = _mm256_round_ps(_mm256_mul_ps(vx5, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn6 = _mm256_round_ps(_mm256_mul_ps(vx6, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn7 = _mm256_round_ps(_mm256_mul_ps(vx7, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn8 = _mm256_round_ps(_mm256_mul_ps(vx8, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn9 = _mm256_round_ps(_mm256_mul_ps(vx9, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_hi, vx9);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_lo, vt9);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av * exp2(be) * bv =
// = exp2(ae + be) * (av * bv)
__m256 vf0 = _mm256_mul_ps(vp0, vscalev);
__m256 vf1 = _mm256_mul_ps(vp1, vscalev);
__m256 vf2 = _mm256_mul_ps(vp2, vscalev);
__m256 vf3 = _mm256_mul_ps(vp3, vscalev);
__m256 vf4 = _mm256_mul_ps(vp4, vscalev);
__m256 vf5 = _mm256_mul_ps(vp5, vscalev);
__m256 vf6 = _mm256_mul_ps(vp6, vscalev);
__m256 vf7 = _mm256_mul_ps(vp7, vscalev);
__m256 vf8 = _mm256_mul_ps(vp8, vscalev);
__m256 vf9 = _mm256_mul_ps(vp9, vscalev);
__m256 ve0 = _mm256_add_ps(vn0, vscalee);
__m256 ve1 = _mm256_add_ps(vn1, vscalee);
__m256 ve2 = _mm256_add_ps(vn2, vscalee);
__m256 ve3 = _mm256_add_ps(vn3, vscalee);
__m256 ve4 = _mm256_add_ps(vn4, vscalee);
__m256 ve5 = _mm256_add_ps(vn5, vscalee);
__m256 ve6 = _mm256_add_ps(vn6, vscalee);
__m256 ve7 = _mm256_add_ps(vn7, vscalee);
__m256 ve8 = _mm256_add_ps(vn8, vscalee);
__m256 ve9 = _mm256_add_ps(vn9, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
// This replacement is done in two steps:
// 1. Clamp minimum e at -127.0.
// 2. Map e to scale factor 0.0 when e == -127.0
ve0 = _mm256_max_ps(ve0, vmin_exponent);
ve1 = _mm256_max_ps(ve1, vmin_exponent);
ve2 = _mm256_max_ps(ve2, vmin_exponent);
ve3 = _mm256_max_ps(ve3, vmin_exponent);
ve4 = _mm256_max_ps(ve4, vmin_exponent);
ve5 = _mm256_max_ps(ve5, vmin_exponent);
ve6 = _mm256_max_ps(ve6, vmin_exponent);
ve7 = _mm256_max_ps(ve7, vmin_exponent);
ve8 = _mm256_max_ps(ve8, vmin_exponent);
ve9 = _mm256_max_ps(ve9, vmin_exponent);
// Convert exponents into scale factors:
// - s = exp2(e) when e > -127.0
// - s = 0.0 when e <= -127.0
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve0, vmagic_bias)), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve1, vmagic_bias)), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve2, vmagic_bias)), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve3, vmagic_bias)), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve4, vmagic_bias)), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve5, vmagic_bias)), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve6, vmagic_bias)), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve7, vmagic_bias)), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve8, vmagic_bias)), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve9, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf0 = _mm256_mul_ps(vf0, vs0);
vf1 = _mm256_mul_ps(vf1, vs1);
vf2 = _mm256_mul_ps(vf2, vs2);
vf3 = _mm256_mul_ps(vf3, vs3);
vf4 = _mm256_mul_ps(vf4, vs4);
vf5 = _mm256_mul_ps(vf5, vs5);
vf6 = _mm256_mul_ps(vf6, vs6);
vf7 = _mm256_mul_ps(vf7, vs7);
vf8 = _mm256_mul_ps(vf8, vs8);
vf9 = _mm256_mul_ps(vf9, vs9);
// Store 80 (10x8) outputs at a time.
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
_mm256_storeu_ps(output + 72, vf9);
output += 80;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m256 vf = _mm256_mul_ps(vp, vscalev);
__m256 ve = _mm256_add_ps(vn, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
ve = _mm256_max_ps(ve, vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf = _mm256_mul_ps(vf, vs);
// Store 8 results at a time.
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vx = _mm256_maskload_ps(input, vmask);
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m256 vf = _mm256_mul_ps(vp, vscalev);
__m256 ve = _mm256_add_ps(vn, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
ve = _mm256_max_ps(ve, vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf = _mm256_mul_ps(vf, vs);
// Store up to 7 inputs at a time.
_mm256_maskstore_ps(output, vmask, vf);
}
_mm256_zeroupper();
}
| 14,267 | 43.867925 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleextexp/gen/f32-vscaleextexp-avx2-p5-x88.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleextexp/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vscaleextexp.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vscaleextexp_ukernel__avx2_p5_x88(
size_t batch,
const float* input,
float* output,
float scale_value,
float scale_exp)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
// The smallest batch such that 2**batch is considered non-negligible.
// For smaller batch, 2**batch is replaced with zero.
const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vc0 = _mm256_set1_ps(1.0f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vscalev = _mm256_set1_ps(scale_value);
const __m256 vscalee = _mm256_set1_ps(scale_exp);
for (; batch >= 88 * sizeof(float); batch -= 88 * sizeof(float)) {
// Load 88 (11x8) inputs at a time.
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
const __m256 vx9 = _mm256_loadu_ps(input + 72);
const __m256 vx10 = _mm256_loadu_ps(input + 80);
input += 88;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn2 = _mm256_round_ps(_mm256_mul_ps(vx2, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn3 = _mm256_round_ps(_mm256_mul_ps(vx3, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn4 = _mm256_round_ps(_mm256_mul_ps(vx4, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn5 = _mm256_round_ps(_mm256_mul_ps(vx5, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn6 = _mm256_round_ps(_mm256_mul_ps(vx6, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn7 = _mm256_round_ps(_mm256_mul_ps(vx7, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn8 = _mm256_round_ps(_mm256_mul_ps(vx8, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn9 = _mm256_round_ps(_mm256_mul_ps(vx9, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn10 = _mm256_round_ps(_mm256_mul_ps(vx10, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_hi, vx9);
__m256 vt10 = _mm256_fmadd_ps(vn10, vminus_ln2_hi, vx10);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_lo, vt9);
vt10 = _mm256_fmadd_ps(vn10, vminus_ln2_lo, vt10);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
__m256 vp10 = _mm256_fmadd_ps(vc5, vt10, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc1);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc0);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av * exp2(be) * bv =
// = exp2(ae + be) * (av * bv)
__m256 vf0 = _mm256_mul_ps(vp0, vscalev);
__m256 vf1 = _mm256_mul_ps(vp1, vscalev);
__m256 vf2 = _mm256_mul_ps(vp2, vscalev);
__m256 vf3 = _mm256_mul_ps(vp3, vscalev);
__m256 vf4 = _mm256_mul_ps(vp4, vscalev);
__m256 vf5 = _mm256_mul_ps(vp5, vscalev);
__m256 vf6 = _mm256_mul_ps(vp6, vscalev);
__m256 vf7 = _mm256_mul_ps(vp7, vscalev);
__m256 vf8 = _mm256_mul_ps(vp8, vscalev);
__m256 vf9 = _mm256_mul_ps(vp9, vscalev);
__m256 vf10 = _mm256_mul_ps(vp10, vscalev);
__m256 ve0 = _mm256_add_ps(vn0, vscalee);
__m256 ve1 = _mm256_add_ps(vn1, vscalee);
__m256 ve2 = _mm256_add_ps(vn2, vscalee);
__m256 ve3 = _mm256_add_ps(vn3, vscalee);
__m256 ve4 = _mm256_add_ps(vn4, vscalee);
__m256 ve5 = _mm256_add_ps(vn5, vscalee);
__m256 ve6 = _mm256_add_ps(vn6, vscalee);
__m256 ve7 = _mm256_add_ps(vn7, vscalee);
__m256 ve8 = _mm256_add_ps(vn8, vscalee);
__m256 ve9 = _mm256_add_ps(vn9, vscalee);
__m256 ve10 = _mm256_add_ps(vn10, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
// This replacement is done in two steps:
// 1. Clamp minimum e at -127.0.
// 2. Map e to scale factor 0.0 when e == -127.0
ve0 = _mm256_max_ps(ve0, vmin_exponent);
ve1 = _mm256_max_ps(ve1, vmin_exponent);
ve2 = _mm256_max_ps(ve2, vmin_exponent);
ve3 = _mm256_max_ps(ve3, vmin_exponent);
ve4 = _mm256_max_ps(ve4, vmin_exponent);
ve5 = _mm256_max_ps(ve5, vmin_exponent);
ve6 = _mm256_max_ps(ve6, vmin_exponent);
ve7 = _mm256_max_ps(ve7, vmin_exponent);
ve8 = _mm256_max_ps(ve8, vmin_exponent);
ve9 = _mm256_max_ps(ve9, vmin_exponent);
ve10 = _mm256_max_ps(ve10, vmin_exponent);
// Convert exponents into scale factors:
// - s = exp2(e) when e > -127.0
// - s = 0.0 when e <= -127.0
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve0, vmagic_bias)), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve1, vmagic_bias)), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve2, vmagic_bias)), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve3, vmagic_bias)), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve4, vmagic_bias)), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve5, vmagic_bias)), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve6, vmagic_bias)), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve7, vmagic_bias)), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve8, vmagic_bias)), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve9, vmagic_bias)), 23));
const __m256 vs10 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve10, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf0 = _mm256_mul_ps(vf0, vs0);
vf1 = _mm256_mul_ps(vf1, vs1);
vf2 = _mm256_mul_ps(vf2, vs2);
vf3 = _mm256_mul_ps(vf3, vs3);
vf4 = _mm256_mul_ps(vf4, vs4);
vf5 = _mm256_mul_ps(vf5, vs5);
vf6 = _mm256_mul_ps(vf6, vs6);
vf7 = _mm256_mul_ps(vf7, vs7);
vf8 = _mm256_mul_ps(vf8, vs8);
vf9 = _mm256_mul_ps(vf9, vs9);
vf10 = _mm256_mul_ps(vf10, vs10);
// Store 88 (11x8) outputs at a time.
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
_mm256_storeu_ps(output + 72, vf9);
_mm256_storeu_ps(output + 80, vf10);
output += 88;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m256 vf = _mm256_mul_ps(vp, vscalev);
__m256 ve = _mm256_add_ps(vn, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
ve = _mm256_max_ps(ve, vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf = _mm256_mul_ps(vf, vs);
// Store 8 results at a time.
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vx = _mm256_maskload_ps(input, vmask);
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m256 vf = _mm256_mul_ps(vp, vscalev);
__m256 ve = _mm256_add_ps(vn, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
ve = _mm256_max_ps(ve, vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf = _mm256_mul_ps(vf, vs);
// Store up to 7 inputs at a time.
_mm256_maskstore_ps(output, vmask, vf);
}
_mm256_zeroupper();
}
| 15,130 | 44.438438 | 122 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleextexp/gen/f32-vscaleextexp-avx2-p5-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleextexp/avx2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vscaleextexp.h>
static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
void xnn_f32_vscaleextexp_ukernel__avx2_p5_x96(
size_t batch,
const float* input,
float* output,
float scale_value,
float scale_exp)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f);
const __m256 vminus_ln2_hi = _mm256_set1_ps(-0x1.62E43p-1f);
const __m256 vminus_ln2_lo = _mm256_set1_ps(0x1.05C61p-29f);
// The smallest batch such that 2**batch is considered non-negligible.
// For smaller batch, 2**batch is replaced with zero.
const __m256 vmin_exponent = _mm256_set1_ps(-127.0f);
const __m256 vmagic_bias = _mm256_set1_ps(0x1.8000FEp23f);
const __m256 vc0 = _mm256_set1_ps(1.0f);
const __m256 vc1 = _mm256_set1_ps(0x1.FFFFF6p-1f);
const __m256 vc2 = _mm256_set1_ps(0x1.FFFDC6p-2f);
const __m256 vc3 = _mm256_set1_ps(0x1.555A80p-3f);
const __m256 vc4 = _mm256_set1_ps(0x1.573A1Ap-5f);
const __m256 vc5 = _mm256_set1_ps(0x1.0F9F9Cp-7f);
const __m256 vscalev = _mm256_set1_ps(scale_value);
const __m256 vscalee = _mm256_set1_ps(scale_exp);
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
// Load 96 (12x8) inputs at a time.
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
const __m256 vx9 = _mm256_loadu_ps(input + 72);
const __m256 vx10 = _mm256_loadu_ps(input + 80);
const __m256 vx11 = _mm256_loadu_ps(input + 88);
input += 96;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn0 = _mm256_round_ps(_mm256_mul_ps(vx0, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn1 = _mm256_round_ps(_mm256_mul_ps(vx1, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn2 = _mm256_round_ps(_mm256_mul_ps(vx2, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn3 = _mm256_round_ps(_mm256_mul_ps(vx3, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn4 = _mm256_round_ps(_mm256_mul_ps(vx4, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn5 = _mm256_round_ps(_mm256_mul_ps(vx5, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn6 = _mm256_round_ps(_mm256_mul_ps(vx6, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn7 = _mm256_round_ps(_mm256_mul_ps(vx7, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn8 = _mm256_round_ps(_mm256_mul_ps(vx8, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn9 = _mm256_round_ps(_mm256_mul_ps(vx9, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn10 = _mm256_round_ps(_mm256_mul_ps(vx10, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
const __m256 vn11 = _mm256_round_ps(_mm256_mul_ps(vx11, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_hi, vx9);
__m256 vt10 = _mm256_fmadd_ps(vn10, vminus_ln2_hi, vx10);
__m256 vt11 = _mm256_fmadd_ps(vn11, vminus_ln2_hi, vx11);
vt0 = _mm256_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm256_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm256_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm256_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm256_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm256_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm256_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm256_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm256_fmadd_ps(vn9, vminus_ln2_lo, vt9);
vt10 = _mm256_fmadd_ps(vn10, vminus_ln2_lo, vt10);
vt11 = _mm256_fmadd_ps(vn11, vminus_ln2_lo, vt11);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
__m256 vp10 = _mm256_fmadd_ps(vc5, vt10, vc4);
__m256 vp11 = _mm256_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc1);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc0);
vp10 = _mm256_fmadd_ps(vp10, vt10, vc0);
vp11 = _mm256_fmadd_ps(vp11, vt11, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av * exp2(be) * bv =
// = exp2(ae + be) * (av * bv)
__m256 vf0 = _mm256_mul_ps(vp0, vscalev);
__m256 vf1 = _mm256_mul_ps(vp1, vscalev);
__m256 vf2 = _mm256_mul_ps(vp2, vscalev);
__m256 vf3 = _mm256_mul_ps(vp3, vscalev);
__m256 vf4 = _mm256_mul_ps(vp4, vscalev);
__m256 vf5 = _mm256_mul_ps(vp5, vscalev);
__m256 vf6 = _mm256_mul_ps(vp6, vscalev);
__m256 vf7 = _mm256_mul_ps(vp7, vscalev);
__m256 vf8 = _mm256_mul_ps(vp8, vscalev);
__m256 vf9 = _mm256_mul_ps(vp9, vscalev);
__m256 vf10 = _mm256_mul_ps(vp10, vscalev);
__m256 vf11 = _mm256_mul_ps(vp11, vscalev);
__m256 ve0 = _mm256_add_ps(vn0, vscalee);
__m256 ve1 = _mm256_add_ps(vn1, vscalee);
__m256 ve2 = _mm256_add_ps(vn2, vscalee);
__m256 ve3 = _mm256_add_ps(vn3, vscalee);
__m256 ve4 = _mm256_add_ps(vn4, vscalee);
__m256 ve5 = _mm256_add_ps(vn5, vscalee);
__m256 ve6 = _mm256_add_ps(vn6, vscalee);
__m256 ve7 = _mm256_add_ps(vn7, vscalee);
__m256 ve8 = _mm256_add_ps(vn8, vscalee);
__m256 ve9 = _mm256_add_ps(vn9, vscalee);
__m256 ve10 = _mm256_add_ps(vn10, vscalee);
__m256 ve11 = _mm256_add_ps(vn11, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
// This replacement is done in two steps:
// 1. Clamp minimum e at -127.0.
// 2. Map e to scale factor 0.0 when e == -127.0
ve0 = _mm256_max_ps(ve0, vmin_exponent);
ve1 = _mm256_max_ps(ve1, vmin_exponent);
ve2 = _mm256_max_ps(ve2, vmin_exponent);
ve3 = _mm256_max_ps(ve3, vmin_exponent);
ve4 = _mm256_max_ps(ve4, vmin_exponent);
ve5 = _mm256_max_ps(ve5, vmin_exponent);
ve6 = _mm256_max_ps(ve6, vmin_exponent);
ve7 = _mm256_max_ps(ve7, vmin_exponent);
ve8 = _mm256_max_ps(ve8, vmin_exponent);
ve9 = _mm256_max_ps(ve9, vmin_exponent);
ve10 = _mm256_max_ps(ve10, vmin_exponent);
ve11 = _mm256_max_ps(ve11, vmin_exponent);
// Convert exponents into scale factors:
// - s = exp2(e) when e > -127.0
// - s = 0.0 when e <= -127.0
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve0, vmagic_bias)), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve1, vmagic_bias)), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve2, vmagic_bias)), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve3, vmagic_bias)), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve4, vmagic_bias)), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve5, vmagic_bias)), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve6, vmagic_bias)), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve7, vmagic_bias)), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve8, vmagic_bias)), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve9, vmagic_bias)), 23));
const __m256 vs10 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve10, vmagic_bias)), 23));
const __m256 vs11 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve11, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf0 = _mm256_mul_ps(vf0, vs0);
vf1 = _mm256_mul_ps(vf1, vs1);
vf2 = _mm256_mul_ps(vf2, vs2);
vf3 = _mm256_mul_ps(vf3, vs3);
vf4 = _mm256_mul_ps(vf4, vs4);
vf5 = _mm256_mul_ps(vf5, vs5);
vf6 = _mm256_mul_ps(vf6, vs6);
vf7 = _mm256_mul_ps(vf7, vs7);
vf8 = _mm256_mul_ps(vf8, vs8);
vf9 = _mm256_mul_ps(vf9, vs9);
vf10 = _mm256_mul_ps(vf10, vs10);
vf11 = _mm256_mul_ps(vf11, vs11);
// Store 96 (12x8) outputs at a time.
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
_mm256_storeu_ps(output + 72, vf9);
_mm256_storeu_ps(output + 80, vf10);
_mm256_storeu_ps(output + 88, vf11);
output += 96;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 inputs at a time.
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m256 vf = _mm256_mul_ps(vp, vscalev);
__m256 ve = _mm256_add_ps(vn, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
ve = _mm256_max_ps(ve, vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf = _mm256_mul_ps(vf, vs);
// Store 8 results at a time.
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - batch));
// Load up to 7 inputs at a time.
const __m256 vx = _mm256_maskload_ps(input, vmask);
// Compute reduced argument batch := round(input / log(2)).
const __m256 vn = _mm256_round_ps(_mm256_mul_ps(vx, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm256_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vp = _mm256_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m256 vf = _mm256_mul_ps(vp, vscalev);
__m256 ve = _mm256_add_ps(vn, vscalee);
// For computational efficiency, replace exp2(e) with 0.0f when e <= -127.0.
ve = _mm256_max_ps(ve, vmin_exponent);
// Convert exponents into scale factors.
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(_mm256_add_ps(ve, vmagic_bias)), 23));
// Multiply "mantissa" by the scale factor.
vf = _mm256_mul_ps(vf, vs);
// Store up to 7 inputs at a time.
_mm256_maskstore_ps(output, vmask, vf);
}
_mm256_zeroupper();
}
| 15,993 | 44.95977 | 122 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleextexp/gen/f32-vscaleextexp-avx512f-p5-scalef-x112.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleextexp/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vscaleextexp.h>
void xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x112(
size_t batch,
const float* input,
float* output,
float scale_value,
float scale_exp)
{
assert(batch % sizeof(float) == 0);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vscalev = _mm512_set1_ps(scale_value);
const __m512 vscalee = _mm512_set1_ps(scale_exp);
for (; batch >= 112 * sizeof(float); batch -= 112 * sizeof(float)) {
// Load 112 (7x16) inputs at a time.
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
input += 112;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av * exp2(be) * bv =
// = exp2(ae + be) * (av * bv)
__m512 vf0 = _mm512_mul_ps(vp0, vscalev);
__m512 vf1 = _mm512_mul_ps(vp1, vscalev);
__m512 vf2 = _mm512_mul_ps(vp2, vscalev);
__m512 vf3 = _mm512_mul_ps(vp3, vscalev);
__m512 vf4 = _mm512_mul_ps(vp4, vscalev);
__m512 vf5 = _mm512_mul_ps(vp5, vscalev);
__m512 vf6 = _mm512_mul_ps(vp6, vscalev);
const __m512 ve0 = _mm512_add_ps(vn0, vscalee);
const __m512 ve1 = _mm512_add_ps(vn1, vscalee);
const __m512 ve2 = _mm512_add_ps(vn2, vscalee);
const __m512 ve3 = _mm512_add_ps(vn3, vscalee);
const __m512 ve4 = _mm512_add_ps(vn4, vscalee);
const __m512 ve5 = _mm512_add_ps(vn5, vscalee);
const __m512 ve6 = _mm512_add_ps(vn6, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf0 = _mm512_scalef_ps(vf0, ve0);
vf1 = _mm512_scalef_ps(vf1, ve1);
vf2 = _mm512_scalef_ps(vf2, ve2);
vf3 = _mm512_scalef_ps(vf3, ve3);
vf4 = _mm512_scalef_ps(vf4, ve4);
vf5 = _mm512_scalef_ps(vf5, ve5);
vf6 = _mm512_scalef_ps(vf6, ve6);
// Store 128 (8x16) results at a time.
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 0, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
output += 112;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m512 vf = _mm512_mul_ps(vp, vscalev);
const __m512 ve = _mm512_add_ps(vn, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf = _mm512_scalef_ps(vf, ve);
// Store 16 results at a time.
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m512 vf = _mm512_mul_ps(vp, vscalev);
const __m512 ve = _mm512_add_ps(vn, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf = _mm512_scalef_ps(vf, ve);
// Store up to 15 results at a time.
_mm512_mask_storeu_ps(output, vmask, vf);
}
_mm256_zeroupper();
}
| 9,186 | 38.943478 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleextexp/gen/f32-vscaleextexp-avx512f-p5-scalef-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleextexp/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vscaleextexp.h>
void xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x128(
size_t batch,
const float* input,
float* output,
float scale_value,
float scale_exp)
{
assert(batch % sizeof(float) == 0);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vscalev = _mm512_set1_ps(scale_value);
const __m512 vscalee = _mm512_set1_ps(scale_exp);
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
// Load 128 (8x16) inputs at a time.
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
input += 128;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av * exp2(be) * bv =
// = exp2(ae + be) * (av * bv)
__m512 vf0 = _mm512_mul_ps(vp0, vscalev);
__m512 vf1 = _mm512_mul_ps(vp1, vscalev);
__m512 vf2 = _mm512_mul_ps(vp2, vscalev);
__m512 vf3 = _mm512_mul_ps(vp3, vscalev);
__m512 vf4 = _mm512_mul_ps(vp4, vscalev);
__m512 vf5 = _mm512_mul_ps(vp5, vscalev);
__m512 vf6 = _mm512_mul_ps(vp6, vscalev);
__m512 vf7 = _mm512_mul_ps(vp7, vscalev);
const __m512 ve0 = _mm512_add_ps(vn0, vscalee);
const __m512 ve1 = _mm512_add_ps(vn1, vscalee);
const __m512 ve2 = _mm512_add_ps(vn2, vscalee);
const __m512 ve3 = _mm512_add_ps(vn3, vscalee);
const __m512 ve4 = _mm512_add_ps(vn4, vscalee);
const __m512 ve5 = _mm512_add_ps(vn5, vscalee);
const __m512 ve6 = _mm512_add_ps(vn6, vscalee);
const __m512 ve7 = _mm512_add_ps(vn7, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf0 = _mm512_scalef_ps(vf0, ve0);
vf1 = _mm512_scalef_ps(vf1, ve1);
vf2 = _mm512_scalef_ps(vf2, ve2);
vf3 = _mm512_scalef_ps(vf3, ve3);
vf4 = _mm512_scalef_ps(vf4, ve4);
vf5 = _mm512_scalef_ps(vf5, ve5);
vf6 = _mm512_scalef_ps(vf6, ve6);
vf7 = _mm512_scalef_ps(vf7, ve7);
// Store 128 (8x16) results at a time.
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 0, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
output += 128;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m512 vf = _mm512_mul_ps(vp, vscalev);
const __m512 ve = _mm512_add_ps(vn, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf = _mm512_scalef_ps(vf, ve);
// Store 16 results at a time.
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m512 vf = _mm512_mul_ps(vp, vscalev);
const __m512 ve = _mm512_add_ps(vn, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf = _mm512_scalef_ps(vf, ve);
// Store up to 15 results at a time.
_mm512_mask_storeu_ps(output, vmask, vf);
}
_mm256_zeroupper();
}
| 9,820 | 39.415638 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleextexp/gen/f32-vscaleextexp-avx512f-p5-scalef-x144.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleextexp/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vscaleextexp.h>
void xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x144(
size_t batch,
const float* input,
float* output,
float scale_value,
float scale_exp)
{
assert(batch % sizeof(float) == 0);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vscalev = _mm512_set1_ps(scale_value);
const __m512 vscalee = _mm512_set1_ps(scale_exp);
for (; batch >= 144 * sizeof(float); batch -= 144 * sizeof(float)) {
// Load 144 (9x16) inputs at a time.
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
input += 144;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av * exp2(be) * bv =
// = exp2(ae + be) * (av * bv)
__m512 vf0 = _mm512_mul_ps(vp0, vscalev);
__m512 vf1 = _mm512_mul_ps(vp1, vscalev);
__m512 vf2 = _mm512_mul_ps(vp2, vscalev);
__m512 vf3 = _mm512_mul_ps(vp3, vscalev);
__m512 vf4 = _mm512_mul_ps(vp4, vscalev);
__m512 vf5 = _mm512_mul_ps(vp5, vscalev);
__m512 vf6 = _mm512_mul_ps(vp6, vscalev);
__m512 vf7 = _mm512_mul_ps(vp7, vscalev);
__m512 vf8 = _mm512_mul_ps(vp8, vscalev);
const __m512 ve0 = _mm512_add_ps(vn0, vscalee);
const __m512 ve1 = _mm512_add_ps(vn1, vscalee);
const __m512 ve2 = _mm512_add_ps(vn2, vscalee);
const __m512 ve3 = _mm512_add_ps(vn3, vscalee);
const __m512 ve4 = _mm512_add_ps(vn4, vscalee);
const __m512 ve5 = _mm512_add_ps(vn5, vscalee);
const __m512 ve6 = _mm512_add_ps(vn6, vscalee);
const __m512 ve7 = _mm512_add_ps(vn7, vscalee);
const __m512 ve8 = _mm512_add_ps(vn8, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf0 = _mm512_scalef_ps(vf0, ve0);
vf1 = _mm512_scalef_ps(vf1, ve1);
vf2 = _mm512_scalef_ps(vf2, ve2);
vf3 = _mm512_scalef_ps(vf3, ve3);
vf4 = _mm512_scalef_ps(vf4, ve4);
vf5 = _mm512_scalef_ps(vf5, ve5);
vf6 = _mm512_scalef_ps(vf6, ve6);
vf7 = _mm512_scalef_ps(vf7, ve7);
vf8 = _mm512_scalef_ps(vf8, ve8);
// Store 128 (8x16) results at a time.
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 0, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
_mm512_storeu_ps(output + 128, vf8);
output += 144;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m512 vf = _mm512_mul_ps(vp, vscalev);
const __m512 ve = _mm512_add_ps(vn, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf = _mm512_scalef_ps(vf, ve);
// Store 16 results at a time.
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m512 vf = _mm512_mul_ps(vp, vscalev);
const __m512 ve = _mm512_add_ps(vn, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf = _mm512_scalef_ps(vf, ve);
// Store up to 15 results at a time.
_mm512_mask_storeu_ps(output, vmask, vf);
}
_mm256_zeroupper();
}
| 10,454 | 39.839844 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleextexp/gen/f32-vscaleextexp-avx512f-p5-scalef-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleextexp/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vscaleextexp.h>
void xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x16(
size_t batch,
const float* input,
float* output,
float scale_value,
float scale_exp)
{
assert(batch % sizeof(float) == 0);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vscalev = _mm512_set1_ps(scale_value);
const __m512 vscalee = _mm512_set1_ps(scale_exp);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 (1x16) inputs at a time.
const __m512 vx0 = _mm512_loadu_ps(input);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av * exp2(be) * bv =
// = exp2(ae + be) * (av * bv)
__m512 vf0 = _mm512_mul_ps(vp0, vscalev);
const __m512 ve0 = _mm512_add_ps(vn0, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf0 = _mm512_scalef_ps(vf0, ve0);
// Store 128 (8x16) results at a time.
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 0, vf0);
output += 16;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m512 vf = _mm512_mul_ps(vp, vscalev);
const __m512 ve = _mm512_add_ps(vn, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf = _mm512_scalef_ps(vf, ve);
// Store 16 results at a time.
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m512 vf = _mm512_mul_ps(vp, vscalev);
const __m512 ve = _mm512_add_ps(vn, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf = _mm512_scalef_ps(vf, ve);
// Store up to 15 results at a time.
_mm512_mask_storeu_ps(output, vmask, vf);
}
_mm256_zeroupper();
}
| 5,388 | 34.453947 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleextexp/gen/f32-vscaleextexp-avx512f-p5-scalef-x160.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleextexp/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vscaleextexp.h>
void xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x160(
size_t batch,
const float* input,
float* output,
float scale_value,
float scale_exp)
{
assert(batch % sizeof(float) == 0);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vscalev = _mm512_set1_ps(scale_value);
const __m512 vscalee = _mm512_set1_ps(scale_exp);
for (; batch >= 160 * sizeof(float); batch -= 160 * sizeof(float)) {
// Load 160 (10x16) inputs at a time.
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
const __m512 vx9 = _mm512_loadu_ps(input + 144);
input += 160;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_hi, vx9);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_lo, vt9);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av * exp2(be) * bv =
// = exp2(ae + be) * (av * bv)
__m512 vf0 = _mm512_mul_ps(vp0, vscalev);
__m512 vf1 = _mm512_mul_ps(vp1, vscalev);
__m512 vf2 = _mm512_mul_ps(vp2, vscalev);
__m512 vf3 = _mm512_mul_ps(vp3, vscalev);
__m512 vf4 = _mm512_mul_ps(vp4, vscalev);
__m512 vf5 = _mm512_mul_ps(vp5, vscalev);
__m512 vf6 = _mm512_mul_ps(vp6, vscalev);
__m512 vf7 = _mm512_mul_ps(vp7, vscalev);
__m512 vf8 = _mm512_mul_ps(vp8, vscalev);
__m512 vf9 = _mm512_mul_ps(vp9, vscalev);
const __m512 ve0 = _mm512_add_ps(vn0, vscalee);
const __m512 ve1 = _mm512_add_ps(vn1, vscalee);
const __m512 ve2 = _mm512_add_ps(vn2, vscalee);
const __m512 ve3 = _mm512_add_ps(vn3, vscalee);
const __m512 ve4 = _mm512_add_ps(vn4, vscalee);
const __m512 ve5 = _mm512_add_ps(vn5, vscalee);
const __m512 ve6 = _mm512_add_ps(vn6, vscalee);
const __m512 ve7 = _mm512_add_ps(vn7, vscalee);
const __m512 ve8 = _mm512_add_ps(vn8, vscalee);
const __m512 ve9 = _mm512_add_ps(vn9, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf0 = _mm512_scalef_ps(vf0, ve0);
vf1 = _mm512_scalef_ps(vf1, ve1);
vf2 = _mm512_scalef_ps(vf2, ve2);
vf3 = _mm512_scalef_ps(vf3, ve3);
vf4 = _mm512_scalef_ps(vf4, ve4);
vf5 = _mm512_scalef_ps(vf5, ve5);
vf6 = _mm512_scalef_ps(vf6, ve6);
vf7 = _mm512_scalef_ps(vf7, ve7);
vf8 = _mm512_scalef_ps(vf8, ve8);
vf9 = _mm512_scalef_ps(vf9, ve9);
// Store 128 (8x16) results at a time.
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 0, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
_mm512_storeu_ps(output + 128, vf8);
_mm512_storeu_ps(output + 144, vf9);
output += 160;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m512 vf = _mm512_mul_ps(vp, vscalev);
const __m512 ve = _mm512_add_ps(vn, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf = _mm512_scalef_ps(vf, ve);
// Store 16 results at a time.
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m512 vf = _mm512_mul_ps(vp, vscalev);
const __m512 ve = _mm512_add_ps(vn, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf = _mm512_scalef_ps(vf, ve);
// Store up to 15 results at a time.
_mm512_mask_storeu_ps(output, vmask, vf);
}
_mm256_zeroupper();
}
| 11,089 | 40.226766 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleextexp/gen/f32-vscaleextexp-avx512f-p5-scalef-x176.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleextexp/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vscaleextexp.h>
void xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x176(
size_t batch,
const float* input,
float* output,
float scale_value,
float scale_exp)
{
assert(batch % sizeof(float) == 0);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vscalev = _mm512_set1_ps(scale_value);
const __m512 vscalee = _mm512_set1_ps(scale_exp);
for (; batch >= 176 * sizeof(float); batch -= 176 * sizeof(float)) {
// Load 176 (11x16) inputs at a time.
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
const __m512 vx9 = _mm512_loadu_ps(input + 144);
const __m512 vx10 = _mm512_loadu_ps(input + 160);
input += 176;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
const __m512 vn10 = _mm512_roundscale_ps(_mm512_mul_ps(vx10, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_hi, vx9);
__m512 vt10 = _mm512_fmadd_ps(vn10, vminus_ln2_hi, vx10);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_lo, vt9);
vt10 = _mm512_fmadd_ps(vn10, vminus_ln2_lo, vt10);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
__m512 vp10 = _mm512_fmadd_ps(vc5, vt10, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av * exp2(be) * bv =
// = exp2(ae + be) * (av * bv)
__m512 vf0 = _mm512_mul_ps(vp0, vscalev);
__m512 vf1 = _mm512_mul_ps(vp1, vscalev);
__m512 vf2 = _mm512_mul_ps(vp2, vscalev);
__m512 vf3 = _mm512_mul_ps(vp3, vscalev);
__m512 vf4 = _mm512_mul_ps(vp4, vscalev);
__m512 vf5 = _mm512_mul_ps(vp5, vscalev);
__m512 vf6 = _mm512_mul_ps(vp6, vscalev);
__m512 vf7 = _mm512_mul_ps(vp7, vscalev);
__m512 vf8 = _mm512_mul_ps(vp8, vscalev);
__m512 vf9 = _mm512_mul_ps(vp9, vscalev);
__m512 vf10 = _mm512_mul_ps(vp10, vscalev);
const __m512 ve0 = _mm512_add_ps(vn0, vscalee);
const __m512 ve1 = _mm512_add_ps(vn1, vscalee);
const __m512 ve2 = _mm512_add_ps(vn2, vscalee);
const __m512 ve3 = _mm512_add_ps(vn3, vscalee);
const __m512 ve4 = _mm512_add_ps(vn4, vscalee);
const __m512 ve5 = _mm512_add_ps(vn5, vscalee);
const __m512 ve6 = _mm512_add_ps(vn6, vscalee);
const __m512 ve7 = _mm512_add_ps(vn7, vscalee);
const __m512 ve8 = _mm512_add_ps(vn8, vscalee);
const __m512 ve9 = _mm512_add_ps(vn9, vscalee);
const __m512 ve10 = _mm512_add_ps(vn10, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf0 = _mm512_scalef_ps(vf0, ve0);
vf1 = _mm512_scalef_ps(vf1, ve1);
vf2 = _mm512_scalef_ps(vf2, ve2);
vf3 = _mm512_scalef_ps(vf3, ve3);
vf4 = _mm512_scalef_ps(vf4, ve4);
vf5 = _mm512_scalef_ps(vf5, ve5);
vf6 = _mm512_scalef_ps(vf6, ve6);
vf7 = _mm512_scalef_ps(vf7, ve7);
vf8 = _mm512_scalef_ps(vf8, ve8);
vf9 = _mm512_scalef_ps(vf9, ve9);
vf10 = _mm512_scalef_ps(vf10, ve10);
// Store 128 (8x16) results at a time.
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 0, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
_mm512_storeu_ps(output + 128, vf8);
_mm512_storeu_ps(output + 144, vf9);
_mm512_storeu_ps(output + 160, vf10);
output += 176;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m512 vf = _mm512_mul_ps(vp, vscalev);
const __m512 ve = _mm512_add_ps(vn, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf = _mm512_scalef_ps(vf, ve);
// Store 16 results at a time.
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m512 vf = _mm512_mul_ps(vp, vscalev);
const __m512 ve = _mm512_add_ps(vn, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf = _mm512_scalef_ps(vf, ve);
// Store up to 15 results at a time.
_mm512_mask_storeu_ps(output, vmask, vf);
}
_mm256_zeroupper();
}
| 11,754 | 40.684397 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleextexp/gen/f32-vscaleextexp-avx512f-p5-scalef-x192.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleextexp/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vscaleextexp.h>
void xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x192(
size_t batch,
const float* input,
float* output,
float scale_value,
float scale_exp)
{
assert(batch % sizeof(float) == 0);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vscalev = _mm512_set1_ps(scale_value);
const __m512 vscalee = _mm512_set1_ps(scale_exp);
for (; batch >= 192 * sizeof(float); batch -= 192 * sizeof(float)) {
// Load 192 (12x16) inputs at a time.
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
const __m512 vx6 = _mm512_loadu_ps(input + 96);
const __m512 vx7 = _mm512_loadu_ps(input + 112);
const __m512 vx8 = _mm512_loadu_ps(input + 128);
const __m512 vx9 = _mm512_loadu_ps(input + 144);
const __m512 vx10 = _mm512_loadu_ps(input + 160);
const __m512 vx11 = _mm512_loadu_ps(input + 176);
input += 192;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
const __m512 vn10 = _mm512_roundscale_ps(_mm512_mul_ps(vx10, vlog2e), 0);
const __m512 vn11 = _mm512_roundscale_ps(_mm512_mul_ps(vx11, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_hi, vx6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_hi, vx7);
__m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8);
__m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_hi, vx9);
__m512 vt10 = _mm512_fmadd_ps(vn10, vminus_ln2_hi, vx10);
__m512 vt11 = _mm512_fmadd_ps(vn11, vminus_ln2_hi, vx11);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
vt6 = _mm512_fmadd_ps(vn6, vminus_ln2_lo, vt6);
vt7 = _mm512_fmadd_ps(vn7, vminus_ln2_lo, vt7);
vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8);
vt9 = _mm512_fmadd_ps(vn9, vminus_ln2_lo, vt9);
vt10 = _mm512_fmadd_ps(vn10, vminus_ln2_lo, vt10);
vt11 = _mm512_fmadd_ps(vn11, vminus_ln2_lo, vt11);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
__m512 vp10 = _mm512_fmadd_ps(vc5, vt10, vc4);
__m512 vp11 = _mm512_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc0);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av * exp2(be) * bv =
// = exp2(ae + be) * (av * bv)
__m512 vf0 = _mm512_mul_ps(vp0, vscalev);
__m512 vf1 = _mm512_mul_ps(vp1, vscalev);
__m512 vf2 = _mm512_mul_ps(vp2, vscalev);
__m512 vf3 = _mm512_mul_ps(vp3, vscalev);
__m512 vf4 = _mm512_mul_ps(vp4, vscalev);
__m512 vf5 = _mm512_mul_ps(vp5, vscalev);
__m512 vf6 = _mm512_mul_ps(vp6, vscalev);
__m512 vf7 = _mm512_mul_ps(vp7, vscalev);
__m512 vf8 = _mm512_mul_ps(vp8, vscalev);
__m512 vf9 = _mm512_mul_ps(vp9, vscalev);
__m512 vf10 = _mm512_mul_ps(vp10, vscalev);
__m512 vf11 = _mm512_mul_ps(vp11, vscalev);
const __m512 ve0 = _mm512_add_ps(vn0, vscalee);
const __m512 ve1 = _mm512_add_ps(vn1, vscalee);
const __m512 ve2 = _mm512_add_ps(vn2, vscalee);
const __m512 ve3 = _mm512_add_ps(vn3, vscalee);
const __m512 ve4 = _mm512_add_ps(vn4, vscalee);
const __m512 ve5 = _mm512_add_ps(vn5, vscalee);
const __m512 ve6 = _mm512_add_ps(vn6, vscalee);
const __m512 ve7 = _mm512_add_ps(vn7, vscalee);
const __m512 ve8 = _mm512_add_ps(vn8, vscalee);
const __m512 ve9 = _mm512_add_ps(vn9, vscalee);
const __m512 ve10 = _mm512_add_ps(vn10, vscalee);
const __m512 ve11 = _mm512_add_ps(vn11, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf0 = _mm512_scalef_ps(vf0, ve0);
vf1 = _mm512_scalef_ps(vf1, ve1);
vf2 = _mm512_scalef_ps(vf2, ve2);
vf3 = _mm512_scalef_ps(vf3, ve3);
vf4 = _mm512_scalef_ps(vf4, ve4);
vf5 = _mm512_scalef_ps(vf5, ve5);
vf6 = _mm512_scalef_ps(vf6, ve6);
vf7 = _mm512_scalef_ps(vf7, ve7);
vf8 = _mm512_scalef_ps(vf8, ve8);
vf9 = _mm512_scalef_ps(vf9, ve9);
vf10 = _mm512_scalef_ps(vf10, ve10);
vf11 = _mm512_scalef_ps(vf11, ve11);
// Store 128 (8x16) results at a time.
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 0, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
_mm512_storeu_ps(output + 128, vf8);
_mm512_storeu_ps(output + 144, vf9);
_mm512_storeu_ps(output + 160, vf10);
_mm512_storeu_ps(output + 176, vf11);
output += 192;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m512 vf = _mm512_mul_ps(vp, vscalev);
const __m512 ve = _mm512_add_ps(vn, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf = _mm512_scalef_ps(vf, ve);
// Store 16 results at a time.
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m512 vf = _mm512_mul_ps(vp, vscalev);
const __m512 ve = _mm512_add_ps(vn, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf = _mm512_scalef_ps(vf, ve);
// Store up to 15 results at a time.
_mm512_mask_storeu_ps(output, vmask, vf);
}
_mm256_zeroupper();
}
| 12,419 | 41.101695 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleextexp/gen/f32-vscaleextexp-avx512f-p5-scalef-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleextexp/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vscaleextexp.h>
void xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x32(
size_t batch,
const float* input,
float* output,
float scale_value,
float scale_exp)
{
assert(batch % sizeof(float) == 0);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vscalev = _mm512_set1_ps(scale_value);
const __m512 vscalee = _mm512_set1_ps(scale_exp);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
// Load 32 (2x16) inputs at a time.
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
input += 32;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av * exp2(be) * bv =
// = exp2(ae + be) * (av * bv)
__m512 vf0 = _mm512_mul_ps(vp0, vscalev);
__m512 vf1 = _mm512_mul_ps(vp1, vscalev);
const __m512 ve0 = _mm512_add_ps(vn0, vscalee);
const __m512 ve1 = _mm512_add_ps(vn1, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf0 = _mm512_scalef_ps(vf0, ve0);
vf1 = _mm512_scalef_ps(vf1, ve1);
// Store 128 (8x16) results at a time.
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 0, vf0);
_mm512_storeu_ps(output + 16, vf1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m512 vf = _mm512_mul_ps(vp, vscalev);
const __m512 ve = _mm512_add_ps(vn, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf = _mm512_scalef_ps(vf, ve);
// Store 16 results at a time.
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m512 vf = _mm512_mul_ps(vp, vscalev);
const __m512 ve = _mm512_add_ps(vn, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf = _mm512_scalef_ps(vf, ve);
// Store up to 15 results at a time.
_mm512_mask_storeu_ps(output, vmask, vf);
}
_mm256_zeroupper();
}
| 6,020 | 35.490909 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleextexp/gen/f32-vscaleextexp-avx512f-p5-scalef-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleextexp/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vscaleextexp.h>
void xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x48(
size_t batch,
const float* input,
float* output,
float scale_value,
float scale_exp)
{
assert(batch % sizeof(float) == 0);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vscalev = _mm512_set1_ps(scale_value);
const __m512 vscalee = _mm512_set1_ps(scale_exp);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
// Load 48 (3x16) inputs at a time.
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
input += 48;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av * exp2(be) * bv =
// = exp2(ae + be) * (av * bv)
__m512 vf0 = _mm512_mul_ps(vp0, vscalev);
__m512 vf1 = _mm512_mul_ps(vp1, vscalev);
__m512 vf2 = _mm512_mul_ps(vp2, vscalev);
const __m512 ve0 = _mm512_add_ps(vn0, vscalee);
const __m512 ve1 = _mm512_add_ps(vn1, vscalee);
const __m512 ve2 = _mm512_add_ps(vn2, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf0 = _mm512_scalef_ps(vf0, ve0);
vf1 = _mm512_scalef_ps(vf1, ve1);
vf2 = _mm512_scalef_ps(vf2, ve2);
// Store 128 (8x16) results at a time.
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 0, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
output += 48;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m512 vf = _mm512_mul_ps(vp, vscalev);
const __m512 ve = _mm512_add_ps(vn, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf = _mm512_scalef_ps(vf, ve);
// Store 16 results at a time.
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m512 vf = _mm512_mul_ps(vp, vscalev);
const __m512 ve = _mm512_add_ps(vn, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf = _mm512_scalef_ps(vf, ve);
// Store up to 15 results at a time.
_mm512_mask_storeu_ps(output, vmask, vf);
}
_mm256_zeroupper();
}
| 6,652 | 36.376404 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleextexp/gen/f32-vscaleextexp-avx512f-p5-scalef-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleextexp/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vscaleextexp.h>
void xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x64(
size_t batch,
const float* input,
float* output,
float scale_value,
float scale_exp)
{
assert(batch % sizeof(float) == 0);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vscalev = _mm512_set1_ps(scale_value);
const __m512 vscalee = _mm512_set1_ps(scale_exp);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
// Load 64 (4x16) inputs at a time.
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
input += 64;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av * exp2(be) * bv =
// = exp2(ae + be) * (av * bv)
__m512 vf0 = _mm512_mul_ps(vp0, vscalev);
__m512 vf1 = _mm512_mul_ps(vp1, vscalev);
__m512 vf2 = _mm512_mul_ps(vp2, vscalev);
__m512 vf3 = _mm512_mul_ps(vp3, vscalev);
const __m512 ve0 = _mm512_add_ps(vn0, vscalee);
const __m512 ve1 = _mm512_add_ps(vn1, vscalee);
const __m512 ve2 = _mm512_add_ps(vn2, vscalee);
const __m512 ve3 = _mm512_add_ps(vn3, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf0 = _mm512_scalef_ps(vf0, ve0);
vf1 = _mm512_scalef_ps(vf1, ve1);
vf2 = _mm512_scalef_ps(vf2, ve2);
vf3 = _mm512_scalef_ps(vf3, ve3);
// Store 128 (8x16) results at a time.
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 0, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
output += 64;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m512 vf = _mm512_mul_ps(vp, vscalev);
const __m512 ve = _mm512_add_ps(vn, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf = _mm512_scalef_ps(vf, ve);
// Store 16 results at a time.
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m512 vf = _mm512_mul_ps(vp, vscalev);
const __m512 ve = _mm512_add_ps(vn, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf = _mm512_scalef_ps(vf, ve);
// Store up to 15 results at a time.
_mm512_mask_storeu_ps(output, vmask, vf);
}
_mm256_zeroupper();
}
| 7,284 | 37.141361 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleextexp/gen/f32-vscaleextexp-avx512f-p5-scalef-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleextexp/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vscaleextexp.h>
void xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x80(
size_t batch,
const float* input,
float* output,
float scale_value,
float scale_exp)
{
assert(batch % sizeof(float) == 0);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vscalev = _mm512_set1_ps(scale_value);
const __m512 vscalee = _mm512_set1_ps(scale_exp);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
// Load 80 (5x16) inputs at a time.
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
input += 80;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av * exp2(be) * bv =
// = exp2(ae + be) * (av * bv)
__m512 vf0 = _mm512_mul_ps(vp0, vscalev);
__m512 vf1 = _mm512_mul_ps(vp1, vscalev);
__m512 vf2 = _mm512_mul_ps(vp2, vscalev);
__m512 vf3 = _mm512_mul_ps(vp3, vscalev);
__m512 vf4 = _mm512_mul_ps(vp4, vscalev);
const __m512 ve0 = _mm512_add_ps(vn0, vscalee);
const __m512 ve1 = _mm512_add_ps(vn1, vscalee);
const __m512 ve2 = _mm512_add_ps(vn2, vscalee);
const __m512 ve3 = _mm512_add_ps(vn3, vscalee);
const __m512 ve4 = _mm512_add_ps(vn4, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf0 = _mm512_scalef_ps(vf0, ve0);
vf1 = _mm512_scalef_ps(vf1, ve1);
vf2 = _mm512_scalef_ps(vf2, ve2);
vf3 = _mm512_scalef_ps(vf3, ve3);
vf4 = _mm512_scalef_ps(vf4, ve4);
// Store 128 (8x16) results at a time.
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 0, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
output += 80;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m512 vf = _mm512_mul_ps(vp, vscalev);
const __m512 ve = _mm512_add_ps(vn, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf = _mm512_scalef_ps(vf, ve);
// Store 16 results at a time.
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m512 vf = _mm512_mul_ps(vp, vscalev);
const __m512 ve = _mm512_add_ps(vn, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf = _mm512_scalef_ps(vf, ve);
// Store up to 15 results at a time.
_mm512_mask_storeu_ps(output, vmask, vf);
}
_mm256_zeroupper();
}
| 7,916 | 37.808824 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vscaleextexp/gen/f32-vscaleextexp-avx512f-p5-scalef-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vscaleextexp/avx512f-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vscaleextexp.h>
void xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x96(
size_t batch,
const float* input,
float* output,
float scale_value,
float scale_exp)
{
assert(batch % sizeof(float) == 0);
const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f);
const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f);
const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f);
const __m512 vc0 = _mm512_set1_ps(1.0f);
const __m512 vc1 = _mm512_set1_ps(0x1.FFFFF6p-1f);
const __m512 vc2 = _mm512_set1_ps(0x1.FFFDC6p-2f);
const __m512 vc3 = _mm512_set1_ps(0x1.555A80p-3f);
const __m512 vc4 = _mm512_set1_ps(0x1.573A1Ap-5f);
const __m512 vc5 = _mm512_set1_ps(0x1.0F9F9Cp-7f);
const __m512 vscalev = _mm512_set1_ps(scale_value);
const __m512 vscalee = _mm512_set1_ps(scale_exp);
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
// Load 96 (6x16) inputs at a time.
const __m512 vx0 = _mm512_loadu_ps(input);
const __m512 vx1 = _mm512_loadu_ps(input + 16);
const __m512 vx2 = _mm512_loadu_ps(input + 32);
const __m512 vx3 = _mm512_loadu_ps(input + 48);
const __m512 vx4 = _mm512_loadu_ps(input + 64);
const __m512 vx5 = _mm512_loadu_ps(input + 80);
input += 96;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_hi, vx0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_hi, vx1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_hi, vx2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_hi, vx3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_hi, vx4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_hi, vx5);
vt0 = _mm512_fmadd_ps(vn0, vminus_ln2_lo, vt0);
vt1 = _mm512_fmadd_ps(vn1, vminus_ln2_lo, vt1);
vt2 = _mm512_fmadd_ps(vn2, vminus_ln2_lo, vt2);
vt3 = _mm512_fmadd_ps(vn3, vminus_ln2_lo, vt3);
vt4 = _mm512_fmadd_ps(vn4, vminus_ln2_lo, vt4);
vt5 = _mm512_fmadd_ps(vn5, vminus_ln2_lo, vt5);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation where
// - vnX is "exponent"
// - vpX is "mantissa"
//
// exp2(ae) * av * exp2(be) * bv =
// = exp2(ae + be) * (av * bv)
__m512 vf0 = _mm512_mul_ps(vp0, vscalev);
__m512 vf1 = _mm512_mul_ps(vp1, vscalev);
__m512 vf2 = _mm512_mul_ps(vp2, vscalev);
__m512 vf3 = _mm512_mul_ps(vp3, vscalev);
__m512 vf4 = _mm512_mul_ps(vp4, vscalev);
__m512 vf5 = _mm512_mul_ps(vp5, vscalev);
const __m512 ve0 = _mm512_add_ps(vn0, vscalee);
const __m512 ve1 = _mm512_add_ps(vn1, vscalee);
const __m512 ve2 = _mm512_add_ps(vn2, vscalee);
const __m512 ve3 = _mm512_add_ps(vn3, vscalee);
const __m512 ve4 = _mm512_add_ps(vn4, vscalee);
const __m512 ve5 = _mm512_add_ps(vn5, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf0 = _mm512_scalef_ps(vf0, ve0);
vf1 = _mm512_scalef_ps(vf1, ve1);
vf2 = _mm512_scalef_ps(vf2, ve2);
vf3 = _mm512_scalef_ps(vf3, ve3);
vf4 = _mm512_scalef_ps(vf4, ve4);
vf5 = _mm512_scalef_ps(vf5, ve5);
// Store 128 (8x16) results at a time.
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 0, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
output += 96;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 inputs at a time.
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m512 vf = _mm512_mul_ps(vp, vscalev);
const __m512 ve = _mm512_add_ps(vn, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf = _mm512_scalef_ps(vf, ve);
// Store 16 results at a time.
_mm512_storeu_ps(output, vf);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
// Load up to 15 inputs at a time.
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
// Compute reduced argument batch := round(input / log(2)).
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
// Compute reduced argument t := input - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vx);
vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
// Multiply "extended" floating-point numbers in ("mantissa", "exponent") representation.
__m512 vf = _mm512_mul_ps(vp, vscalev);
const __m512 ve = _mm512_add_ps(vn, vscalee);
// Multiply "mantissa" by the exp2("exponent").
vf = _mm512_scalef_ps(vf, ve);
// Store up to 15 results at a time.
_mm512_mask_storeu_ps(output, vmask, vf);
}
_mm256_zeroupper();
}
| 8,548 | 38.396313 | 106 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-aarch64-neonfma-rr1-lut2048-p1-div-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__aarch64_neonfma_rr1_lut2048_p1_div_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.ln2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.denorm_cutoff);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 12);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxAB]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
const float32x4_t vp89AB = vmulq_f32(vt89AB, vc1);
const float32x4_t vy0123 = vfmaq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmaq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
float32x4_t vf0123 = vdivq_f32(vy0123, vd0123);
float32x4_t vf4567 = vdivq_f32(vy4567, vd4567);
float32x4_t vf89AB = vdivq_f32(vy89AB, vd89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vf = vdivq_f32(vy, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vf = vdivq_f32(vy, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 9,654 | 47.034826 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-aarch64-neonfma-rr1-lut2048-p1-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__aarch64_neonfma_rr1_lut2048_p1_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.ln2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 12);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 12);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxEF]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
const float32x4_t vp89AB = vmulq_f32(vt89AB, vc1);
const float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc1);
const float32x4_t vy0123 = vfmaq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmaq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vfmaq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
float32x4_t vf0123 = vdivq_f32(vy0123, vd0123);
float32x4_t vf4567 = vdivq_f32(vy4567, vd4567);
float32x4_t vf89AB = vdivq_f32(vy89AB, vd89AB);
float32x4_t vfCDEF = vdivq_f32(vyCDEF, vdCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vf = vdivq_f32(vy, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vf = vdivq_f32(vy, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 11,293 | 49.419643 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-aarch64-neonfma-rr1-lut2048-p1-div-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__aarch64_neonfma_rr1_lut2048_p1_div_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.ln2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.denorm_cutoff);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 12);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 12);
const int32x4_t veGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 12);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxEF]);
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxIJ]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vln2);
const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
const float32x4_t vp89AB = vmulq_f32(vt89AB, vc1);
const float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc1);
const float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc1);
const float32x4_t vy0123 = vfmaq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmaq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vfmaq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vyGHIJ = vfmaq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(vyGHIJ, vone);
float32x4_t vf0123 = vdivq_f32(vy0123, vd0123);
float32x4_t vf4567 = vdivq_f32(vy4567, vd4567);
float32x4_t vf89AB = vdivq_f32(vy89AB, vd89AB);
float32x4_t vfCDEF = vdivq_f32(vyCDEF, vdCDEF);
float32x4_t vfGHIJ = vdivq_f32(vyGHIJ, vdGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vf = vdivq_f32(vy, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vf = vdivq_f32(vy, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 12,932 | 51.360324 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-aarch64-neonfma-rr1-lut2048-p1-div-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__aarch64_neonfma_rr1_lut2048_p1_div_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.ln2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vxKLMN = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
const float32x4_t vzKLMN = vabsq_f32(vxKLMN);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
float32x4_t vnKLMN = vfmaq_f32(vmagic_bias, vzKLMN, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 12);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 12);
const int32x4_t veGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 12);
const int32x4_t veKLMN = vshlq_n_s32(vreinterpretq_s32_f32(vnKLMN), 12);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64x2_t vidxKLMN = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnKLMN), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxEF]);
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxIJ]);
const uint64_t vidxKL = vgetq_lane_u64(vidxKLMN, 0);
const uint64_t vidxMN = vgetq_lane_u64(vidxKLMN, 1);
float32x2_t vlKL = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxKL]);
float32x2_t vlMN = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidxMN]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
vlKL = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxKL >> 32)], vlKL, 1);
vlMN = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidxMN >> 32)], vlMN, 1);
const float32x4_t vlKLMN = vcombine_f32(vlKL, vlMN);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
const float32x4_t vsKLMN = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlKLMN), veKLMN));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
vnKLMN = vsubq_f32(vnKLMN, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vln2);
float32x4_t vtKLMN = vfmaq_f32(vzKLMN, vnKLMN, vln2);
const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
const float32x4_t vp89AB = vmulq_f32(vt89AB, vc1);
const float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc1);
const float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc1);
const float32x4_t vpKLMN = vmulq_f32(vtKLMN, vc1);
const float32x4_t vy0123 = vfmaq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmaq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vfmaq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vyGHIJ = vfmaq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
const float32x4_t vyKLMN = vfmaq_f32(vsKLMN, vsKLMN, vpKLMN);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(vyGHIJ, vone);
const float32x4_t vdKLMN = vaddq_f32(vyKLMN, vone);
float32x4_t vf0123 = vdivq_f32(vy0123, vd0123);
float32x4_t vf4567 = vdivq_f32(vy4567, vd4567);
float32x4_t vf89AB = vdivq_f32(vy89AB, vd89AB);
float32x4_t vfCDEF = vdivq_f32(vyCDEF, vdCDEF);
float32x4_t vfGHIJ = vdivq_f32(vyGHIJ, vdGHIJ);
float32x4_t vfKLMN = vdivq_f32(vyKLMN, vdKLMN);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
vfKLMN = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfKLMN), vcagtq_f32(vxKLMN, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
const uint32x4_t vmKLMN = vcltq_f32(vxKLMN, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vfKLMN = vbslq_f32(vmKLMN, vfKLMN, vsubq_f32(vone, vfKLMN));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vst1q_f32(output, vfKLMN); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vf = vdivq_f32(vy, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vf = vdivq_f32(vy, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 14,571 | 52.97037 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-aarch64-neonfma-rr1-lut2048-p1-div-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__aarch64_neonfma_rr1_lut2048_p1_div_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.ln2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.denorm_cutoff);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vf = vdivq_f32(vy, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vf = vdivq_f32(vy, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 4,646 | 40.123894 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-aarch64-neonfma-rr1-lut2048-p1-div-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut2048-p1.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
void xnn_f32_vsigmoid_ukernel__aarch64_neonfma_rr1_lut2048_p1_div_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.ln2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut2048_p1.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 12);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 12);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx67]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
const float32x4_t vp0123 = vmulq_f32(vt0123, vc1);
const float32x4_t vp4567 = vmulq_f32(vt4567, vc1);
const float32x4_t vy0123 = vfmaq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmaq_f32(vs4567, vs4567, vp4567);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
float32x4_t vf0123 = vdivq_f32(vy0123, vd0123);
float32x4_t vf4567 = vdivq_f32(vy4567, vd4567);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vf = vdivq_f32(vy, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
const float32x4_t vp = vmulq_f32(vt, vc1);
const float32x4_t vy = vfmaq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vf = vdivq_f32(vy, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 8,012 | 44.016854 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-aarch64-neonfma-rr1-lut64-p2-div-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__aarch64_neonfma_rr1_lut64_p2_div_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 17);
// Use bits 0:6 bits of batch, as integer, as an index for table lookup of l := 2**(batch % 64).
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxAB]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
vp0123 = vfmsq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmsq_f32(vt4567, vp4567, vt4567);
vp89AB = vfmsq_f32(vt89AB, vp89AB, vt89AB);
const float32x4_t vy0123 = vfmsq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmsq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmsq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
float32x4_t vf0123 = vdivq_f32(vy0123, vd0123);
float32x4_t vf4567 = vdivq_f32(vy4567, vd4567);
float32x4_t vf89AB = vdivq_f32(vy89AB, vd89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vf = vdivq_f32(vy, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vf = vdivq_f32(vy, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 9,875 | 46.480769 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-aarch64-neonfma-rr1-lut64-p2-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__aarch64_neonfma_rr1_lut64_p2_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 17);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 17);
// Use bits 0:6 bits of batch, as integer, as an index for table lookup of l := 2**(batch % 64).
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxEF]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
vp0123 = vfmsq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmsq_f32(vt4567, vp4567, vt4567);
vp89AB = vfmsq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vfmsq_f32(vtCDEF, vpCDEF, vtCDEF);
const float32x4_t vy0123 = vfmsq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmsq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmsq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vfmsq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
float32x4_t vf0123 = vdivq_f32(vy0123, vd0123);
float32x4_t vf4567 = vdivq_f32(vy4567, vd4567);
float32x4_t vf89AB = vdivq_f32(vy89AB, vd89AB);
float32x4_t vfCDEF = vdivq_f32(vyCDEF, vdCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vf = vdivq_f32(vy, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vf = vdivq_f32(vy, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 11,548 | 48.780172 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-aarch64-neonfma-rr1-lut64-p2-div-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__aarch64_neonfma_rr1_lut64_p2_div_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 17);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 17);
const int32x4_t veGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 17);
// Use bits 0:6 bits of batch, as integer, as an index for table lookup of l := 2**(batch % 64).
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxEF]);
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxIJ]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc2);
vp0123 = vfmsq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmsq_f32(vt4567, vp4567, vt4567);
vp89AB = vfmsq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vfmsq_f32(vtCDEF, vpCDEF, vtCDEF);
vpGHIJ = vfmsq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
const float32x4_t vy0123 = vfmsq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmsq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmsq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vfmsq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vyGHIJ = vfmsq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(vyGHIJ, vone);
float32x4_t vf0123 = vdivq_f32(vy0123, vd0123);
float32x4_t vf4567 = vdivq_f32(vy4567, vd4567);
float32x4_t vf89AB = vdivq_f32(vy89AB, vd89AB);
float32x4_t vfCDEF = vdivq_f32(vyCDEF, vdCDEF);
float32x4_t vfGHIJ = vdivq_f32(vyGHIJ, vdGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vf = vdivq_f32(vy, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vf = vdivq_f32(vy, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 13,221 | 50.648438 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-aarch64-neonfma-rr1-lut64-p2-div-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__aarch64_neonfma_rr1_lut64_p2_div_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vxKLMN = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
const float32x4_t vzKLMN = vabsq_f32(vxKLMN);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
float32x4_t vnKLMN = vfmaq_f32(vmagic_bias, vzKLMN, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 17);
const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 17);
const int32x4_t veGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 17);
const int32x4_t veKLMN = vshlq_n_s32(vreinterpretq_s32_f32(vnKLMN), 17);
// Use bits 0:6 bits of batch, as integer, as an index for table lookup of l := 2**(batch % 64).
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64x2_t vidxKLMN = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnKLMN), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxAB]);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxEF]);
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxIJ]);
const uint64_t vidxKL = vgetq_lane_u64(vidxKLMN, 0);
const uint64_t vidxMN = vgetq_lane_u64(vidxKLMN, 1);
float32x2_t vlKL = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxKL]);
float32x2_t vlMN = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxMN]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
vlKL = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxKL >> 32)], vlKL, 1);
vlMN = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxMN >> 32)], vlMN, 1);
const float32x4_t vlKLMN = vcombine_f32(vlKL, vlMN);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
const float32x4_t vsKLMN = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlKLMN), veKLMN));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
vnKLMN = vsubq_f32(vnKLMN, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vln2);
float32x4_t vtKLMN = vfmaq_f32(vzKLMN, vnKLMN, vln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc2);
float32x4_t vpKLMN = vmulq_f32(vtKLMN, vc2);
vp0123 = vfmsq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmsq_f32(vt4567, vp4567, vt4567);
vp89AB = vfmsq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vfmsq_f32(vtCDEF, vpCDEF, vtCDEF);
vpGHIJ = vfmsq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
vpKLMN = vfmsq_f32(vtKLMN, vpKLMN, vtKLMN);
const float32x4_t vy0123 = vfmsq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmsq_f32(vs4567, vs4567, vp4567);
const float32x4_t vy89AB = vfmsq_f32(vs89AB, vs89AB, vp89AB);
const float32x4_t vyCDEF = vfmsq_f32(vsCDEF, vsCDEF, vpCDEF);
const float32x4_t vyGHIJ = vfmsq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
const float32x4_t vyKLMN = vfmsq_f32(vsKLMN, vsKLMN, vpKLMN);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(vyGHIJ, vone);
const float32x4_t vdKLMN = vaddq_f32(vyKLMN, vone);
float32x4_t vf0123 = vdivq_f32(vy0123, vd0123);
float32x4_t vf4567 = vdivq_f32(vy4567, vd4567);
float32x4_t vf89AB = vdivq_f32(vy89AB, vd89AB);
float32x4_t vfCDEF = vdivq_f32(vyCDEF, vdCDEF);
float32x4_t vfGHIJ = vdivq_f32(vyGHIJ, vdGHIJ);
float32x4_t vfKLMN = vdivq_f32(vyKLMN, vdKLMN);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
vfKLMN = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfKLMN), vcagtq_f32(vxKLMN, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
const uint32x4_t vmKLMN = vcltq_f32(vxKLMN, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vfKLMN = vbslq_f32(vmKLMN, vfKLMN, vsubq_f32(vone, vfKLMN));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vst1q_f32(output, vfKLMN); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vf = vdivq_f32(vy, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vf = vdivq_f32(vy, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 14,894 | 52.196429 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-aarch64-neonfma-rr1-lut64-p2-div-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__aarch64_neonfma_rr1_lut64_p2_div_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vf = vdivq_f32(vy, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vf = vdivq_f32(vy, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 4,663 | 39.556522 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-aarch64-neonfma-rr1-lut64-p2-div-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
void xnn_f32_vsigmoid_ukernel__aarch64_neonfma_rr1_lut64_p2_div_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_log2e);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
// Use bits 0:6 bits of batch, as integer, as an index for table lookup of l := 2**(batch % 64).
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
vp0123 = vfmsq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmsq_f32(vt4567, vp4567, vt4567);
const float32x4_t vy0123 = vfmsq_f32(vs0123, vs0123, vp0123);
const float32x4_t vy4567 = vfmsq_f32(vs4567, vs4567, vp4567);
const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
float32x4_t vf0123 = vdivq_f32(vy0123, vd0123);
float32x4_t vf4567 = vdivq_f32(vy4567, vd4567);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vf = vdivq_f32(vy, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmsq_f32(vt, vp, vt);
const float32x4_t vy = vfmsq_f32(vs, vs, vp);
const float32x4_t vd = vaddq_f32(vy, vone);
float32x4_t vf = vdivq_f32(vy, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 8,199 | 43.565217 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-aarch64-neonfma-rr1-p5-div-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__aarch64_neonfma_rr1_p5_div_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_log2e);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
const float32x4_t ve0123 = vfmaq_f32(vs0123, vp0123, vt0123);
const float32x4_t ve4567 = vfmaq_f32(vs4567, vp4567, vt4567);
const float32x4_t ve89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
const float32x4_t vd89AB = vaddq_f32(ve89AB, vone);
float32x4_t vf0123 = vdivq_f32(ve0123, vd0123);
float32x4_t vf4567 = vdivq_f32(ve4567, vd4567);
float32x4_t vf89AB = vdivq_f32(ve89AB, vd89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vf = vdivq_f32(ve, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vf = vdivq_f32(ve, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 6,887 | 38.815029 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-aarch64-neonfma-rr1-p5-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__aarch64_neonfma_rr1_p5_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_log2e);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc4, vc5, vtCDEF);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc1, vpCDEF, vtCDEF);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
const float32x4_t ve0123 = vfmaq_f32(vs0123, vp0123, vt0123);
const float32x4_t ve4567 = vfmaq_f32(vs4567, vp4567, vt4567);
const float32x4_t ve89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
const float32x4_t veCDEF = vfmaq_f32(vsCDEF, vpCDEF, vtCDEF);
const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
const float32x4_t vd89AB = vaddq_f32(ve89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(veCDEF, vone);
float32x4_t vf0123 = vdivq_f32(ve0123, vd0123);
float32x4_t vf4567 = vdivq_f32(ve4567, vd4567);
float32x4_t vf89AB = vdivq_f32(ve89AB, vd89AB);
float32x4_t vfCDEF = vdivq_f32(veCDEF, vdCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vf = vdivq_f32(ve, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vf = vdivq_f32(ve, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 7,968 | 40.722513 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-aarch64-neonfma-rr1-p5-div-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__aarch64_neonfma_rr1_p5_div_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_log2e);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc4, vc5, vtCDEF);
float32x4_t vpGHIJ = vfmaq_f32(vc4, vc5, vtGHIJ);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc3, vpGHIJ, vtGHIJ);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc2, vpGHIJ, vtGHIJ);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc1, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc1, vpGHIJ, vtGHIJ);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
const float32x4_t ve0123 = vfmaq_f32(vs0123, vp0123, vt0123);
const float32x4_t ve4567 = vfmaq_f32(vs4567, vp4567, vt4567);
const float32x4_t ve89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
const float32x4_t veCDEF = vfmaq_f32(vsCDEF, vpCDEF, vtCDEF);
const float32x4_t veGHIJ = vfmaq_f32(vsGHIJ, vpGHIJ, vtGHIJ);
const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
const float32x4_t vd89AB = vaddq_f32(ve89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(veCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(veGHIJ, vone);
float32x4_t vf0123 = vdivq_f32(ve0123, vd0123);
float32x4_t vf4567 = vdivq_f32(ve4567, vd4567);
float32x4_t vf89AB = vdivq_f32(ve89AB, vd89AB);
float32x4_t vfCDEF = vdivq_f32(veCDEF, vdCDEF);
float32x4_t vfGHIJ = vdivq_f32(veGHIJ, vdGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vf = vdivq_f32(ve, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vf = vdivq_f32(ve, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 9,049 | 42.301435 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-aarch64-neonfma-rr1-p5-div-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__aarch64_neonfma_rr1_p5_div_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_log2e);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vxKLMN = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
const float32x4_t vz89AB = vabsq_f32(vx89AB);
const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
const float32x4_t vzGHIJ = vabsq_f32(vxGHIJ);
const float32x4_t vzKLMN = vabsq_f32(vxKLMN);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vminus_log2e);
float32x4_t vnKLMN = vfmaq_f32(vmagic_bias, vzKLMN, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
const float32x4_t vsKLMN = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnKLMN), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
vnKLMN = vsubq_f32(vnKLMN, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vln2);
float32x4_t vtKLMN = vfmaq_f32(vzKLMN, vnKLMN, vln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc4, vc5, vtCDEF);
float32x4_t vpGHIJ = vfmaq_f32(vc4, vc5, vtGHIJ);
float32x4_t vpKLMN = vfmaq_f32(vc4, vc5, vtKLMN);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc3, vpGHIJ, vtGHIJ);
vpKLMN = vfmaq_f32(vc3, vpKLMN, vtKLMN);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc2, vpGHIJ, vtGHIJ);
vpKLMN = vfmaq_f32(vc2, vpKLMN, vtKLMN);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc1, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc1, vpGHIJ, vtGHIJ);
vpKLMN = vfmaq_f32(vc1, vpKLMN, vtKLMN);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
vtKLMN = vmulq_f32(vtKLMN, vsKLMN);
const float32x4_t ve0123 = vfmaq_f32(vs0123, vp0123, vt0123);
const float32x4_t ve4567 = vfmaq_f32(vs4567, vp4567, vt4567);
const float32x4_t ve89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
const float32x4_t veCDEF = vfmaq_f32(vsCDEF, vpCDEF, vtCDEF);
const float32x4_t veGHIJ = vfmaq_f32(vsGHIJ, vpGHIJ, vtGHIJ);
const float32x4_t veKLMN = vfmaq_f32(vsKLMN, vpKLMN, vtKLMN);
const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
const float32x4_t vd89AB = vaddq_f32(ve89AB, vone);
const float32x4_t vdCDEF = vaddq_f32(veCDEF, vone);
const float32x4_t vdGHIJ = vaddq_f32(veGHIJ, vone);
const float32x4_t vdKLMN = vaddq_f32(veKLMN, vone);
float32x4_t vf0123 = vdivq_f32(ve0123, vd0123);
float32x4_t vf4567 = vdivq_f32(ve4567, vd4567);
float32x4_t vf89AB = vdivq_f32(ve89AB, vd89AB);
float32x4_t vfCDEF = vdivq_f32(veCDEF, vdCDEF);
float32x4_t vfGHIJ = vdivq_f32(veGHIJ, vdGHIJ);
float32x4_t vfKLMN = vdivq_f32(veKLMN, vdKLMN);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcagtq_f32(vxGHIJ, vdenorm_cutoff)));
vfKLMN = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfKLMN), vcagtq_f32(vxKLMN, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
const uint32x4_t vmKLMN = vcltq_f32(vxKLMN, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
vfGHIJ = vbslq_f32(vmGHIJ, vfGHIJ, vsubq_f32(vone, vfGHIJ));
vfKLMN = vbslq_f32(vmKLMN, vfKLMN, vsubq_f32(vone, vfKLMN));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vst1q_f32(output, vfKLMN); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vf = vdivq_f32(ve, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vf = vdivq_f32(ve, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 10,130 | 43.629956 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-aarch64-neonfma-rr1-p5-div-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__aarch64_neonfma_rr1_p5_div_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_log2e);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vf = vdivq_f32(ve, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vf = vdivq_f32(ve, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 3,553 | 34.54 | 101 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-aarch64-neonfma-rr1-p5-div-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__aarch64_neonfma_rr1_p5_div_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_log2e);
const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vone = vmovq_n_f32(1.0f);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vabsq_f32(vx0123);
const float32x4_t vz4567 = vabsq_f32(vx4567);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
const float32x4_t ve0123 = vfmaq_f32(vs0123, vp0123, vt0123);
const float32x4_t ve4567 = vfmaq_f32(vs4567, vp4567, vt4567);
const float32x4_t vd0123 = vaddq_f32(ve0123, vone);
const float32x4_t vd4567 = vaddq_f32(ve4567, vone);
float32x4_t vf0123 = vdivq_f32(ve0123, vd0123);
float32x4_t vf4567 = vdivq_f32(ve4567, vd4567);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vf = vdivq_f32(ve, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
vst1q_f32(output, vf); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vabsq_f32(vx);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
const float32x4_t ve = vfmaq_f32(vs, vp, vt);
const float32x4_t vd = vaddq_f32(ve, vone);
float32x4_t vf = vdivq_f32(ve, vd);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
}
}
}
| 5,803 | 36.445161 | 113 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx-rr2-p5-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx_rr2_p5_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_rr2_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p5.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_lo);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx_rr2_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx_rr2_p5.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
input += 16;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
const __m128 vs_lo0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
const __m128 vs_hi0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
const __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo0), vs_hi0, 1);
const __m128 vs_lo1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
const __m128 vs_hi1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
const __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo1), vs_hi1, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc5, vt0), vc4);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc5, vt1), vc4);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc1);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
const __m256 ve0 = _mm256_add_ps(_mm256_mul_ps(vt0, vp0), vs0);
const __m256 ve1 = _mm256_add_ps(_mm256_mul_ps(vt1, vp1), vs1);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
__m256 vf0 = _mm256_div_ps(ve0, vd0);
__m256 vf1 = _mm256_div_ps(ve1, vd1);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 7,368 | 38.61828 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx-rr2-p5-div-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx_rr2_p5_div_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_rr2_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p5.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_lo);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx_rr2_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx_rr2_p5.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
input += 24;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
__m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
const __m128 vs_lo0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
const __m128 vs_hi0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
const __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo0), vs_hi0, 1);
const __m128 vs_lo1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
const __m128 vs_hi1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
const __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo1), vs_hi1, 1);
const __m128 vs_lo2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23));
const __m128 vs_hi2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 23));
const __m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo2), vs_hi2, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
__m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc5, vt0), vc4);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc5, vt1), vc4);
__m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc5, vt2), vc4);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc1);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc1);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
const __m256 ve0 = _mm256_add_ps(_mm256_mul_ps(vt0, vp0), vs0);
const __m256 ve1 = _mm256_add_ps(_mm256_mul_ps(vt1, vp1), vs1);
const __m256 ve2 = _mm256_add_ps(_mm256_mul_ps(vt2, vp2), vs2);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
__m256 vf0 = _mm256_div_ps(ve0, vd0);
__m256 vf1 = _mm256_div_ps(ve1, vd1);
__m256 vf2 = _mm256_div_ps(ve2, vd2);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 8,645 | 40.970874 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx-rr2-p5-div-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx_rr2_p5_div_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_rr2_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p5.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_lo);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx_rr2_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx_rr2_p5.denorm_cutoff);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
input += 32;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
__m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
__m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
const __m128 vs_lo0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
const __m128 vs_hi0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
const __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo0), vs_hi0, 1);
const __m128 vs_lo1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
const __m128 vs_hi1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
const __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo1), vs_hi1, 1);
const __m128 vs_lo2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23));
const __m128 vs_hi2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 23));
const __m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo2), vs_hi2, 1);
const __m128 vs_lo3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23));
const __m128 vs_hi3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn3, 1)), 23));
const __m256 vs3 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo3), vs_hi3, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
__m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
__m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc5, vt0), vc4);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc5, vt1), vc4);
__m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc5, vt2), vc4);
__m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc5, vt3), vc4);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc1);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc1);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc1);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
const __m256 ve0 = _mm256_add_ps(_mm256_mul_ps(vt0, vp0), vs0);
const __m256 ve1 = _mm256_add_ps(_mm256_mul_ps(vt1, vp1), vs1);
const __m256 ve2 = _mm256_add_ps(_mm256_mul_ps(vt2, vp2), vs2);
const __m256 ve3 = _mm256_add_ps(_mm256_mul_ps(vt3, vp3), vs3);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
__m256 vf0 = _mm256_div_ps(ve0, vd0);
__m256 vf1 = _mm256_div_ps(ve1, vd1);
__m256 vf2 = _mm256_div_ps(ve2, vd2);
__m256 vf3 = _mm256_div_ps(ve3, vd3);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 9,922 | 42.90708 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx-rr2-p5-div-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx_rr2_p5_div_x40(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_rr2_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p5.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_lo);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx_rr2_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx_rr2_p5.denorm_cutoff);
for (; batch >= 40 * sizeof(float); batch -= 40 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
input += 40;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
__m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
__m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
__m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias);
const __m128 vs_lo0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
const __m128 vs_hi0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
const __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo0), vs_hi0, 1);
const __m128 vs_lo1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
const __m128 vs_hi1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
const __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo1), vs_hi1, 1);
const __m128 vs_lo2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23));
const __m128 vs_hi2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 23));
const __m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo2), vs_hi2, 1);
const __m128 vs_lo3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23));
const __m128 vs_hi3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn3, 1)), 23));
const __m256 vs3 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo3), vs_hi3, 1);
const __m128 vs_lo4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23));
const __m128 vs_hi4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn4, 1)), 23));
const __m256 vs4 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo4), vs_hi4, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
__m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
__m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
__m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_hi), vz4);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_lo), vt4);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc5, vt0), vc4);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc5, vt1), vc4);
__m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc5, vt2), vc4);
__m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc5, vt3), vc4);
__m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc5, vt4), vc4);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc2);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc1);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc1);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc1);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc1);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
const __m256 ve0 = _mm256_add_ps(_mm256_mul_ps(vt0, vp0), vs0);
const __m256 ve1 = _mm256_add_ps(_mm256_mul_ps(vt1, vp1), vs1);
const __m256 ve2 = _mm256_add_ps(_mm256_mul_ps(vt2, vp2), vs2);
const __m256 ve3 = _mm256_add_ps(_mm256_mul_ps(vt3, vp3), vs3);
const __m256 ve4 = _mm256_add_ps(_mm256_mul_ps(vt4, vp4), vs4);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
__m256 vf0 = _mm256_div_ps(ve0, vd0);
__m256 vf1 = _mm256_div_ps(ve1, vd1);
__m256 vf2 = _mm256_div_ps(ve2, vd2);
__m256 vf3 = _mm256_div_ps(ve3, vd3);
__m256 vf4 = _mm256_div_ps(ve4, vd4);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
output += 40;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 11,199 | 44.528455 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx-rr2-p5-div-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx_rr2_p5_div_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_rr2_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p5.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_lo);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx_rr2_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx_rr2_p5.denorm_cutoff);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
input += 48;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
__m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
__m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
__m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias);
__m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias);
const __m128 vs_lo0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
const __m128 vs_hi0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
const __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo0), vs_hi0, 1);
const __m128 vs_lo1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
const __m128 vs_hi1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
const __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo1), vs_hi1, 1);
const __m128 vs_lo2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23));
const __m128 vs_hi2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 23));
const __m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo2), vs_hi2, 1);
const __m128 vs_lo3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23));
const __m128 vs_hi3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn3, 1)), 23));
const __m256 vs3 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo3), vs_hi3, 1);
const __m128 vs_lo4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23));
const __m128 vs_hi4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn4, 1)), 23));
const __m256 vs4 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo4), vs_hi4, 1);
const __m128 vs_lo5 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23));
const __m128 vs_hi5 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn5, 1)), 23));
const __m256 vs5 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo5), vs_hi5, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
__m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
__m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
__m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_hi), vz4);
__m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_hi), vz5);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_lo), vt4);
vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_lo), vt5);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc5, vt0), vc4);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc5, vt1), vc4);
__m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc5, vt2), vc4);
__m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc5, vt3), vc4);
__m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc5, vt4), vc4);
__m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc5, vt5), vc4);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc3);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc2);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc2);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc1);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc1);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc1);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc1);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc1);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
const __m256 ve0 = _mm256_add_ps(_mm256_mul_ps(vt0, vp0), vs0);
const __m256 ve1 = _mm256_add_ps(_mm256_mul_ps(vt1, vp1), vs1);
const __m256 ve2 = _mm256_add_ps(_mm256_mul_ps(vt2, vp2), vs2);
const __m256 ve3 = _mm256_add_ps(_mm256_mul_ps(vt3, vp3), vs3);
const __m256 ve4 = _mm256_add_ps(_mm256_mul_ps(vt4, vp4), vs4);
const __m256 ve5 = _mm256_add_ps(_mm256_mul_ps(vt5, vp5), vs5);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
__m256 vf0 = _mm256_div_ps(ve0, vd0);
__m256 vf1 = _mm256_div_ps(ve1, vd1);
__m256 vf2 = _mm256_div_ps(ve2, vd2);
__m256 vf3 = _mm256_div_ps(ve3, vd3);
__m256 vf4 = _mm256_div_ps(ve4, vd4);
__m256 vf5 = _mm256_div_ps(ve5, vd5);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
output += 48;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 12,476 | 45.906015 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx-rr2-p5-div-x56.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx_rr2_p5_div_x56(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_rr2_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p5.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_lo);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx_rr2_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx_rr2_p5.denorm_cutoff);
for (; batch >= 56 * sizeof(float); batch -= 56 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
input += 56;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
__m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
__m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
__m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias);
__m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias);
__m256 vn6 = _mm256_add_ps(_mm256_mul_ps(vz6, vlog2e), vmagic_bias);
const __m128 vs_lo0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
const __m128 vs_hi0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
const __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo0), vs_hi0, 1);
const __m128 vs_lo1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
const __m128 vs_hi1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
const __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo1), vs_hi1, 1);
const __m128 vs_lo2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23));
const __m128 vs_hi2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 23));
const __m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo2), vs_hi2, 1);
const __m128 vs_lo3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23));
const __m128 vs_hi3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn3, 1)), 23));
const __m256 vs3 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo3), vs_hi3, 1);
const __m128 vs_lo4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23));
const __m128 vs_hi4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn4, 1)), 23));
const __m256 vs4 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo4), vs_hi4, 1);
const __m128 vs_lo5 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23));
const __m128 vs_hi5 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn5, 1)), 23));
const __m256 vs5 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo5), vs_hi5, 1);
const __m128 vs_lo6 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23));
const __m128 vs_hi6 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn6, 1)), 23));
const __m256 vs6 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo6), vs_hi6, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
__m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
__m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
__m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_hi), vz4);
__m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_hi), vz5);
__m256 vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2_hi), vz6);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_lo), vt4);
vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_lo), vt5);
vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2_lo), vt6);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc5, vt0), vc4);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc5, vt1), vc4);
__m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc5, vt2), vc4);
__m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc5, vt3), vc4);
__m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc5, vt4), vc4);
__m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc5, vt5), vc4);
__m256 vp6 = _mm256_add_ps(_mm256_mul_ps(vc5, vt6), vc4);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc3);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc3);
vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc2);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc2);
vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc2);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc1);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc1);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc1);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc1);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc1);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc1);
vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
const __m256 ve0 = _mm256_add_ps(_mm256_mul_ps(vt0, vp0), vs0);
const __m256 ve1 = _mm256_add_ps(_mm256_mul_ps(vt1, vp1), vs1);
const __m256 ve2 = _mm256_add_ps(_mm256_mul_ps(vt2, vp2), vs2);
const __m256 ve3 = _mm256_add_ps(_mm256_mul_ps(vt3, vp3), vs3);
const __m256 ve4 = _mm256_add_ps(_mm256_mul_ps(vt4, vp4), vs4);
const __m256 ve5 = _mm256_add_ps(_mm256_mul_ps(vt5, vp5), vs5);
const __m256 ve6 = _mm256_add_ps(_mm256_mul_ps(vt6, vp6), vs6);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
const __m256 vd6 = _mm256_add_ps(ve6, vone);
__m256 vf0 = _mm256_div_ps(ve0, vd0);
__m256 vf1 = _mm256_div_ps(ve1, vd1);
__m256 vf2 = _mm256_div_ps(ve2, vd2);
__m256 vf3 = _mm256_div_ps(ve3, vd3);
__m256 vf4 = _mm256_div_ps(ve4, vd4);
__m256 vf5 = _mm256_div_ps(ve5, vd5);
__m256 vf6 = _mm256_div_ps(ve6, vd6);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vz6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
vf6 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf6), vf6, vx6);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
output += 56;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 13,753 | 47.090909 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx-rr2-p5-div-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx_rr2_p5_div_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_rr2_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p5.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_lo);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx_rr2_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx_rr2_p5.denorm_cutoff);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
input += 64;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
__m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
__m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
__m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias);
__m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias);
__m256 vn6 = _mm256_add_ps(_mm256_mul_ps(vz6, vlog2e), vmagic_bias);
__m256 vn7 = _mm256_add_ps(_mm256_mul_ps(vz7, vlog2e), vmagic_bias);
const __m128 vs_lo0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
const __m128 vs_hi0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
const __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo0), vs_hi0, 1);
const __m128 vs_lo1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
const __m128 vs_hi1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
const __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo1), vs_hi1, 1);
const __m128 vs_lo2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23));
const __m128 vs_hi2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 23));
const __m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo2), vs_hi2, 1);
const __m128 vs_lo3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23));
const __m128 vs_hi3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn3, 1)), 23));
const __m256 vs3 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo3), vs_hi3, 1);
const __m128 vs_lo4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23));
const __m128 vs_hi4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn4, 1)), 23));
const __m256 vs4 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo4), vs_hi4, 1);
const __m128 vs_lo5 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23));
const __m128 vs_hi5 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn5, 1)), 23));
const __m256 vs5 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo5), vs_hi5, 1);
const __m128 vs_lo6 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23));
const __m128 vs_hi6 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn6, 1)), 23));
const __m256 vs6 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo6), vs_hi6, 1);
const __m128 vs_lo7 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23));
const __m128 vs_hi7 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn7, 1)), 23));
const __m256 vs7 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo7), vs_hi7, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
__m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
__m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
__m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_hi), vz4);
__m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_hi), vz5);
__m256 vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2_hi), vz6);
__m256 vt7 = _mm256_add_ps(_mm256_mul_ps(vn7, vminus_ln2_hi), vz7);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_lo), vt4);
vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_lo), vt5);
vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2_lo), vt6);
vt7 = _mm256_add_ps(_mm256_mul_ps(vn7, vminus_ln2_lo), vt7);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc5, vt0), vc4);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc5, vt1), vc4);
__m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc5, vt2), vc4);
__m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc5, vt3), vc4);
__m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc5, vt4), vc4);
__m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc5, vt5), vc4);
__m256 vp6 = _mm256_add_ps(_mm256_mul_ps(vc5, vt6), vc4);
__m256 vp7 = _mm256_add_ps(_mm256_mul_ps(vc5, vt7), vc4);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc3);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc3);
vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc3);
vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc2);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc2);
vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc2);
vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc2);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc1);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc1);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc1);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc1);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc1);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc1);
vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc1);
vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
const __m256 ve0 = _mm256_add_ps(_mm256_mul_ps(vt0, vp0), vs0);
const __m256 ve1 = _mm256_add_ps(_mm256_mul_ps(vt1, vp1), vs1);
const __m256 ve2 = _mm256_add_ps(_mm256_mul_ps(vt2, vp2), vs2);
const __m256 ve3 = _mm256_add_ps(_mm256_mul_ps(vt3, vp3), vs3);
const __m256 ve4 = _mm256_add_ps(_mm256_mul_ps(vt4, vp4), vs4);
const __m256 ve5 = _mm256_add_ps(_mm256_mul_ps(vt5, vp5), vs5);
const __m256 ve6 = _mm256_add_ps(_mm256_mul_ps(vt6, vp6), vs6);
const __m256 ve7 = _mm256_add_ps(_mm256_mul_ps(vt7, vp7), vs7);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
const __m256 vd6 = _mm256_add_ps(ve6, vone);
const __m256 vd7 = _mm256_add_ps(ve7, vone);
__m256 vf0 = _mm256_div_ps(ve0, vd0);
__m256 vf1 = _mm256_div_ps(ve1, vd1);
__m256 vf2 = _mm256_div_ps(ve2, vd2);
__m256 vf3 = _mm256_div_ps(ve3, vd3);
__m256 vf4 = _mm256_div_ps(ve4, vd4);
__m256 vf5 = _mm256_div_ps(ve5, vd5);
__m256 vf6 = _mm256_div_ps(ve6, vd6);
__m256 vf7 = _mm256_div_ps(ve7, vd7);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vz6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vz7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
vf6 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf6), vf6, vx6);
vf7 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf7), vf7, vx7);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
output += 64;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 15,030 | 48.120915 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx-rr2-p5-div-x72.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx_rr2_p5_div_x72(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_rr2_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p5.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_lo);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx_rr2_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx_rr2_p5.denorm_cutoff);
for (; batch >= 72 * sizeof(float); batch -= 72 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
input += 72;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
const __m256 vz8 = _mm256_or_ps(vx8, vsign_mask);
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
__m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
__m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
__m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias);
__m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias);
__m256 vn6 = _mm256_add_ps(_mm256_mul_ps(vz6, vlog2e), vmagic_bias);
__m256 vn7 = _mm256_add_ps(_mm256_mul_ps(vz7, vlog2e), vmagic_bias);
__m256 vn8 = _mm256_add_ps(_mm256_mul_ps(vz8, vlog2e), vmagic_bias);
const __m128 vs_lo0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
const __m128 vs_hi0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
const __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo0), vs_hi0, 1);
const __m128 vs_lo1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
const __m128 vs_hi1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
const __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo1), vs_hi1, 1);
const __m128 vs_lo2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23));
const __m128 vs_hi2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 23));
const __m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo2), vs_hi2, 1);
const __m128 vs_lo3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23));
const __m128 vs_hi3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn3, 1)), 23));
const __m256 vs3 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo3), vs_hi3, 1);
const __m128 vs_lo4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23));
const __m128 vs_hi4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn4, 1)), 23));
const __m256 vs4 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo4), vs_hi4, 1);
const __m128 vs_lo5 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23));
const __m128 vs_hi5 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn5, 1)), 23));
const __m256 vs5 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo5), vs_hi5, 1);
const __m128 vs_lo6 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23));
const __m128 vs_hi6 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn6, 1)), 23));
const __m256 vs6 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo6), vs_hi6, 1);
const __m128 vs_lo7 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23));
const __m128 vs_hi7 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn7, 1)), 23));
const __m256 vs7 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo7), vs_hi7, 1);
const __m128 vs_lo8 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn8)), 23));
const __m128 vs_hi8 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn8, 1)), 23));
const __m256 vs8 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo8), vs_hi8, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
__m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
__m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
__m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_hi), vz4);
__m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_hi), vz5);
__m256 vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2_hi), vz6);
__m256 vt7 = _mm256_add_ps(_mm256_mul_ps(vn7, vminus_ln2_hi), vz7);
__m256 vt8 = _mm256_add_ps(_mm256_mul_ps(vn8, vminus_ln2_hi), vz8);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_lo), vt4);
vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_lo), vt5);
vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2_lo), vt6);
vt7 = _mm256_add_ps(_mm256_mul_ps(vn7, vminus_ln2_lo), vt7);
vt8 = _mm256_add_ps(_mm256_mul_ps(vn8, vminus_ln2_lo), vt8);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc5, vt0), vc4);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc5, vt1), vc4);
__m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc5, vt2), vc4);
__m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc5, vt3), vc4);
__m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc5, vt4), vc4);
__m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc5, vt5), vc4);
__m256 vp6 = _mm256_add_ps(_mm256_mul_ps(vc5, vt6), vc4);
__m256 vp7 = _mm256_add_ps(_mm256_mul_ps(vc5, vt7), vc4);
__m256 vp8 = _mm256_add_ps(_mm256_mul_ps(vc5, vt8), vc4);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc3);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc3);
vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc3);
vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc3);
vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc2);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc2);
vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc2);
vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc2);
vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc2);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc1);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc1);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc1);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc1);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc1);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc1);
vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc1);
vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc1);
vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
const __m256 ve0 = _mm256_add_ps(_mm256_mul_ps(vt0, vp0), vs0);
const __m256 ve1 = _mm256_add_ps(_mm256_mul_ps(vt1, vp1), vs1);
const __m256 ve2 = _mm256_add_ps(_mm256_mul_ps(vt2, vp2), vs2);
const __m256 ve3 = _mm256_add_ps(_mm256_mul_ps(vt3, vp3), vs3);
const __m256 ve4 = _mm256_add_ps(_mm256_mul_ps(vt4, vp4), vs4);
const __m256 ve5 = _mm256_add_ps(_mm256_mul_ps(vt5, vp5), vs5);
const __m256 ve6 = _mm256_add_ps(_mm256_mul_ps(vt6, vp6), vs6);
const __m256 ve7 = _mm256_add_ps(_mm256_mul_ps(vt7, vp7), vs7);
const __m256 ve8 = _mm256_add_ps(_mm256_mul_ps(vt8, vp8), vs8);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
const __m256 vd6 = _mm256_add_ps(ve6, vone);
const __m256 vd7 = _mm256_add_ps(ve7, vone);
const __m256 vd8 = _mm256_add_ps(ve8, vone);
__m256 vf0 = _mm256_div_ps(ve0, vd0);
__m256 vf1 = _mm256_div_ps(ve1, vd1);
__m256 vf2 = _mm256_div_ps(ve2, vd2);
__m256 vf3 = _mm256_div_ps(ve3, vd3);
__m256 vf4 = _mm256_div_ps(ve4, vd4);
__m256 vf5 = _mm256_div_ps(ve5, vd5);
__m256 vf6 = _mm256_div_ps(ve6, vd6);
__m256 vf7 = _mm256_div_ps(ve7, vd7);
__m256 vf8 = _mm256_div_ps(ve8, vd8);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vz6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vz7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vz8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
vf6 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf6), vf6, vx6);
vf7 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf7), vf7, vx7);
vf8 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf8), vf8, vx8);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
output += 72;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 16,307 | 49.02454 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx-rr2-p5-div-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx_rr2_p5_div_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_rr2_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p5.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_lo);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx_rr2_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx_rr2_p5.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 4,700 | 36.608 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx-rr2-p5-div-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx_rr2_p5_div_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_rr2_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p5.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_lo);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx_rr2_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx_rr2_p5.denorm_cutoff);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
const __m256 vx9 = _mm256_loadu_ps(input + 72);
input += 80;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
const __m256 vz8 = _mm256_or_ps(vx8, vsign_mask);
const __m256 vz9 = _mm256_or_ps(vx9, vsign_mask);
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
__m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
__m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
__m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias);
__m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias);
__m256 vn6 = _mm256_add_ps(_mm256_mul_ps(vz6, vlog2e), vmagic_bias);
__m256 vn7 = _mm256_add_ps(_mm256_mul_ps(vz7, vlog2e), vmagic_bias);
__m256 vn8 = _mm256_add_ps(_mm256_mul_ps(vz8, vlog2e), vmagic_bias);
__m256 vn9 = _mm256_add_ps(_mm256_mul_ps(vz9, vlog2e), vmagic_bias);
const __m128 vs_lo0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
const __m128 vs_hi0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
const __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo0), vs_hi0, 1);
const __m128 vs_lo1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
const __m128 vs_hi1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
const __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo1), vs_hi1, 1);
const __m128 vs_lo2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23));
const __m128 vs_hi2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 23));
const __m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo2), vs_hi2, 1);
const __m128 vs_lo3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23));
const __m128 vs_hi3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn3, 1)), 23));
const __m256 vs3 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo3), vs_hi3, 1);
const __m128 vs_lo4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23));
const __m128 vs_hi4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn4, 1)), 23));
const __m256 vs4 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo4), vs_hi4, 1);
const __m128 vs_lo5 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23));
const __m128 vs_hi5 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn5, 1)), 23));
const __m256 vs5 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo5), vs_hi5, 1);
const __m128 vs_lo6 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23));
const __m128 vs_hi6 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn6, 1)), 23));
const __m256 vs6 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo6), vs_hi6, 1);
const __m128 vs_lo7 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23));
const __m128 vs_hi7 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn7, 1)), 23));
const __m256 vs7 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo7), vs_hi7, 1);
const __m128 vs_lo8 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn8)), 23));
const __m128 vs_hi8 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn8, 1)), 23));
const __m256 vs8 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo8), vs_hi8, 1);
const __m128 vs_lo9 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn9)), 23));
const __m128 vs_hi9 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn9, 1)), 23));
const __m256 vs9 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo9), vs_hi9, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
__m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
__m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
__m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_hi), vz4);
__m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_hi), vz5);
__m256 vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2_hi), vz6);
__m256 vt7 = _mm256_add_ps(_mm256_mul_ps(vn7, vminus_ln2_hi), vz7);
__m256 vt8 = _mm256_add_ps(_mm256_mul_ps(vn8, vminus_ln2_hi), vz8);
__m256 vt9 = _mm256_add_ps(_mm256_mul_ps(vn9, vminus_ln2_hi), vz9);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_lo), vt4);
vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_lo), vt5);
vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2_lo), vt6);
vt7 = _mm256_add_ps(_mm256_mul_ps(vn7, vminus_ln2_lo), vt7);
vt8 = _mm256_add_ps(_mm256_mul_ps(vn8, vminus_ln2_lo), vt8);
vt9 = _mm256_add_ps(_mm256_mul_ps(vn9, vminus_ln2_lo), vt9);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc5, vt0), vc4);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc5, vt1), vc4);
__m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc5, vt2), vc4);
__m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc5, vt3), vc4);
__m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc5, vt4), vc4);
__m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc5, vt5), vc4);
__m256 vp6 = _mm256_add_ps(_mm256_mul_ps(vc5, vt6), vc4);
__m256 vp7 = _mm256_add_ps(_mm256_mul_ps(vc5, vt7), vc4);
__m256 vp8 = _mm256_add_ps(_mm256_mul_ps(vc5, vt8), vc4);
__m256 vp9 = _mm256_add_ps(_mm256_mul_ps(vc5, vt9), vc4);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc3);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc3);
vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc3);
vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc3);
vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc3);
vp9 = _mm256_add_ps(_mm256_mul_ps(vp9, vt9), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc2);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc2);
vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc2);
vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc2);
vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc2);
vp9 = _mm256_add_ps(_mm256_mul_ps(vp9, vt9), vc2);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc1);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc1);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc1);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc1);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc1);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc1);
vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc1);
vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc1);
vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc1);
vp9 = _mm256_add_ps(_mm256_mul_ps(vp9, vt9), vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
const __m256 ve0 = _mm256_add_ps(_mm256_mul_ps(vt0, vp0), vs0);
const __m256 ve1 = _mm256_add_ps(_mm256_mul_ps(vt1, vp1), vs1);
const __m256 ve2 = _mm256_add_ps(_mm256_mul_ps(vt2, vp2), vs2);
const __m256 ve3 = _mm256_add_ps(_mm256_mul_ps(vt3, vp3), vs3);
const __m256 ve4 = _mm256_add_ps(_mm256_mul_ps(vt4, vp4), vs4);
const __m256 ve5 = _mm256_add_ps(_mm256_mul_ps(vt5, vp5), vs5);
const __m256 ve6 = _mm256_add_ps(_mm256_mul_ps(vt6, vp6), vs6);
const __m256 ve7 = _mm256_add_ps(_mm256_mul_ps(vt7, vp7), vs7);
const __m256 ve8 = _mm256_add_ps(_mm256_mul_ps(vt8, vp8), vs8);
const __m256 ve9 = _mm256_add_ps(_mm256_mul_ps(vt9, vp9), vs9);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
const __m256 vd6 = _mm256_add_ps(ve6, vone);
const __m256 vd7 = _mm256_add_ps(ve7, vone);
const __m256 vd8 = _mm256_add_ps(ve8, vone);
const __m256 vd9 = _mm256_add_ps(ve9, vone);
__m256 vf0 = _mm256_div_ps(ve0, vd0);
__m256 vf1 = _mm256_div_ps(ve1, vd1);
__m256 vf2 = _mm256_div_ps(ve2, vd2);
__m256 vf3 = _mm256_div_ps(ve3, vd3);
__m256 vf4 = _mm256_div_ps(ve4, vd4);
__m256 vf5 = _mm256_div_ps(ve5, vd5);
__m256 vf6 = _mm256_div_ps(ve6, vd6);
__m256 vf7 = _mm256_div_ps(ve7, vd7);
__m256 vf8 = _mm256_div_ps(ve8, vd8);
__m256 vf9 = _mm256_div_ps(ve9, vd9);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vz6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vz7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vz8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vz9, vdenorm_cutoff, _CMP_LT_OS), vf9);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
vf6 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf6), vf6, vx6);
vf7 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf7), vf7, vx7);
vf8 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf8), vf8, vx8);
vf9 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf9), vf9, vx9);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
_mm256_storeu_ps(output + 72, vf9);
output += 80;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 17,584 | 49.823699 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx-rr2-p5-nr2-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx_rr2_p5_nr2_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_rr2_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p5.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_lo);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx_rr2_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p5.one);
const __m256 vtwo = _mm256_load_ps(params->avx_rr2_p5.two);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx_rr2_p5.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
input += 16;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
const __m128 vs_lo0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
const __m128 vs_hi0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
const __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo0), vs_hi0, 1);
const __m128 vs_lo1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
const __m128 vs_hi1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
const __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo1), vs_hi1, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc5, vt0), vc4);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc5, vt1), vc4);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc1);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
const __m256 ve0 = _mm256_add_ps(_mm256_mul_ps(vt0, vp0), vs0);
const __m256 ve1 = _mm256_add_ps(_mm256_mul_ps(vt1, vp1), vs1);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
vr0 = _mm256_mul_ps(vr0, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr0, vd0)));
vr0 = _mm256_mul_ps(vr0, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr0, vd0)));
vr1 = _mm256_mul_ps(vr1, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr1, vd1)));
vr1 = _mm256_mul_ps(vr1, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr1, vd1)));
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 8,168 | 39.641791 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx-rr2-p5-nr2-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx_rr2_p5_nr2_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_rr2_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p5.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_lo);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx_rr2_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p5.one);
const __m256 vtwo = _mm256_load_ps(params->avx_rr2_p5.two);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx_rr2_p5.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
input += 24;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
__m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
const __m128 vs_lo0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
const __m128 vs_hi0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
const __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo0), vs_hi0, 1);
const __m128 vs_lo1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
const __m128 vs_hi1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
const __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo1), vs_hi1, 1);
const __m128 vs_lo2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23));
const __m128 vs_hi2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 23));
const __m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo2), vs_hi2, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
__m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc5, vt0), vc4);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc5, vt1), vc4);
__m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc5, vt2), vc4);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc1);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc1);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
const __m256 ve0 = _mm256_add_ps(_mm256_mul_ps(vt0, vp0), vs0);
const __m256 ve1 = _mm256_add_ps(_mm256_mul_ps(vt1, vp1), vs1);
const __m256 ve2 = _mm256_add_ps(_mm256_mul_ps(vt2, vp2), vs2);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
__m256 vr2 = _mm256_rcp_ps(vd2);
vr0 = _mm256_mul_ps(vr0, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr0, vd0)));
vr0 = _mm256_mul_ps(vr0, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr0, vd0)));
vr1 = _mm256_mul_ps(vr1, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr1, vd1)));
vr1 = _mm256_mul_ps(vr1, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr1, vd1)));
vr2 = _mm256_mul_ps(vr2, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr2, vd2)));
vr2 = _mm256_mul_ps(vr2, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr2, vd2)));
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 9,634 | 42.013393 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx-rr2-p5-nr2-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx_rr2_p5_nr2_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_rr2_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p5.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_lo);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx_rr2_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p5.one);
const __m256 vtwo = _mm256_load_ps(params->avx_rr2_p5.two);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx_rr2_p5.denorm_cutoff);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
input += 32;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
__m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
__m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
const __m128 vs_lo0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
const __m128 vs_hi0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
const __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo0), vs_hi0, 1);
const __m128 vs_lo1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
const __m128 vs_hi1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
const __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo1), vs_hi1, 1);
const __m128 vs_lo2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23));
const __m128 vs_hi2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 23));
const __m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo2), vs_hi2, 1);
const __m128 vs_lo3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23));
const __m128 vs_hi3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn3, 1)), 23));
const __m256 vs3 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo3), vs_hi3, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
__m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
__m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc5, vt0), vc4);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc5, vt1), vc4);
__m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc5, vt2), vc4);
__m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc5, vt3), vc4);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc1);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc1);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc1);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
const __m256 ve0 = _mm256_add_ps(_mm256_mul_ps(vt0, vp0), vs0);
const __m256 ve1 = _mm256_add_ps(_mm256_mul_ps(vt1, vp1), vs1);
const __m256 ve2 = _mm256_add_ps(_mm256_mul_ps(vt2, vp2), vs2);
const __m256 ve3 = _mm256_add_ps(_mm256_mul_ps(vt3, vp3), vs3);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
__m256 vr2 = _mm256_rcp_ps(vd2);
__m256 vr3 = _mm256_rcp_ps(vd3);
vr0 = _mm256_mul_ps(vr0, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr0, vd0)));
vr0 = _mm256_mul_ps(vr0, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr0, vd0)));
vr1 = _mm256_mul_ps(vr1, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr1, vd1)));
vr1 = _mm256_mul_ps(vr1, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr1, vd1)));
vr2 = _mm256_mul_ps(vr2, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr2, vd2)));
vr2 = _mm256_mul_ps(vr2, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr2, vd2)));
vr3 = _mm256_mul_ps(vr3, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr3, vd3)));
vr3 = _mm256_mul_ps(vr3, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr3, vd3)));
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 11,100 | 43.94332 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx-rr2-p5-nr2-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx_rr2_p5_nr2_x40(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_rr2_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p5.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_lo);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx_rr2_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p5.one);
const __m256 vtwo = _mm256_load_ps(params->avx_rr2_p5.two);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx_rr2_p5.denorm_cutoff);
for (; batch >= 40 * sizeof(float); batch -= 40 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
input += 40;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
__m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
__m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
__m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias);
const __m128 vs_lo0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
const __m128 vs_hi0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
const __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo0), vs_hi0, 1);
const __m128 vs_lo1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
const __m128 vs_hi1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
const __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo1), vs_hi1, 1);
const __m128 vs_lo2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23));
const __m128 vs_hi2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 23));
const __m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo2), vs_hi2, 1);
const __m128 vs_lo3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23));
const __m128 vs_hi3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn3, 1)), 23));
const __m256 vs3 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo3), vs_hi3, 1);
const __m128 vs_lo4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23));
const __m128 vs_hi4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn4, 1)), 23));
const __m256 vs4 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo4), vs_hi4, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
__m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
__m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
__m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_hi), vz4);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_lo), vt4);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc5, vt0), vc4);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc5, vt1), vc4);
__m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc5, vt2), vc4);
__m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc5, vt3), vc4);
__m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc5, vt4), vc4);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc2);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc1);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc1);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc1);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc1);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
const __m256 ve0 = _mm256_add_ps(_mm256_mul_ps(vt0, vp0), vs0);
const __m256 ve1 = _mm256_add_ps(_mm256_mul_ps(vt1, vp1), vs1);
const __m256 ve2 = _mm256_add_ps(_mm256_mul_ps(vt2, vp2), vs2);
const __m256 ve3 = _mm256_add_ps(_mm256_mul_ps(vt3, vp3), vs3);
const __m256 ve4 = _mm256_add_ps(_mm256_mul_ps(vt4, vp4), vs4);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
__m256 vr2 = _mm256_rcp_ps(vd2);
__m256 vr3 = _mm256_rcp_ps(vd3);
__m256 vr4 = _mm256_rcp_ps(vd4);
vr0 = _mm256_mul_ps(vr0, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr0, vd0)));
vr0 = _mm256_mul_ps(vr0, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr0, vd0)));
vr1 = _mm256_mul_ps(vr1, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr1, vd1)));
vr1 = _mm256_mul_ps(vr1, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr1, vd1)));
vr2 = _mm256_mul_ps(vr2, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr2, vd2)));
vr2 = _mm256_mul_ps(vr2, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr2, vd2)));
vr3 = _mm256_mul_ps(vr3, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr3, vd3)));
vr3 = _mm256_mul_ps(vr3, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr3, vd3)));
vr4 = _mm256_mul_ps(vr4, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr4, vd4)));
vr4 = _mm256_mul_ps(vr4, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr4, vd4)));
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
__m256 vf4 = _mm256_mul_ps(ve4, vr4);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
output += 40;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 12,566 | 45.544444 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx-rr2-p5-nr2-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx_rr2_p5_nr2_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_rr2_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p5.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_lo);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx_rr2_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p5.one);
const __m256 vtwo = _mm256_load_ps(params->avx_rr2_p5.two);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx_rr2_p5.denorm_cutoff);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
input += 48;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
__m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
__m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
__m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias);
__m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias);
const __m128 vs_lo0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
const __m128 vs_hi0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
const __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo0), vs_hi0, 1);
const __m128 vs_lo1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
const __m128 vs_hi1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
const __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo1), vs_hi1, 1);
const __m128 vs_lo2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23));
const __m128 vs_hi2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 23));
const __m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo2), vs_hi2, 1);
const __m128 vs_lo3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23));
const __m128 vs_hi3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn3, 1)), 23));
const __m256 vs3 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo3), vs_hi3, 1);
const __m128 vs_lo4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23));
const __m128 vs_hi4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn4, 1)), 23));
const __m256 vs4 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo4), vs_hi4, 1);
const __m128 vs_lo5 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23));
const __m128 vs_hi5 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn5, 1)), 23));
const __m256 vs5 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo5), vs_hi5, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
__m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
__m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
__m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_hi), vz4);
__m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_hi), vz5);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_lo), vt4);
vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_lo), vt5);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc5, vt0), vc4);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc5, vt1), vc4);
__m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc5, vt2), vc4);
__m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc5, vt3), vc4);
__m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc5, vt4), vc4);
__m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc5, vt5), vc4);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc3);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc2);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc2);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc1);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc1);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc1);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc1);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc1);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
const __m256 ve0 = _mm256_add_ps(_mm256_mul_ps(vt0, vp0), vs0);
const __m256 ve1 = _mm256_add_ps(_mm256_mul_ps(vt1, vp1), vs1);
const __m256 ve2 = _mm256_add_ps(_mm256_mul_ps(vt2, vp2), vs2);
const __m256 ve3 = _mm256_add_ps(_mm256_mul_ps(vt3, vp3), vs3);
const __m256 ve4 = _mm256_add_ps(_mm256_mul_ps(vt4, vp4), vs4);
const __m256 ve5 = _mm256_add_ps(_mm256_mul_ps(vt5, vp5), vs5);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
__m256 vr2 = _mm256_rcp_ps(vd2);
__m256 vr3 = _mm256_rcp_ps(vd3);
__m256 vr4 = _mm256_rcp_ps(vd4);
__m256 vr5 = _mm256_rcp_ps(vd5);
vr0 = _mm256_mul_ps(vr0, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr0, vd0)));
vr0 = _mm256_mul_ps(vr0, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr0, vd0)));
vr1 = _mm256_mul_ps(vr1, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr1, vd1)));
vr1 = _mm256_mul_ps(vr1, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr1, vd1)));
vr2 = _mm256_mul_ps(vr2, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr2, vd2)));
vr2 = _mm256_mul_ps(vr2, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr2, vd2)));
vr3 = _mm256_mul_ps(vr3, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr3, vd3)));
vr3 = _mm256_mul_ps(vr3, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr3, vd3)));
vr4 = _mm256_mul_ps(vr4, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr4, vd4)));
vr4 = _mm256_mul_ps(vr4, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr4, vd4)));
vr5 = _mm256_mul_ps(vr5, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr5, vd5)));
vr5 = _mm256_mul_ps(vr5, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr5, vd5)));
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
__m256 vf4 = _mm256_mul_ps(ve4, vr4);
__m256 vf5 = _mm256_mul_ps(ve5, vr5);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
output += 48;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 14,032 | 46.894198 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx-rr2-p5-nr2-x56.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx_rr2_p5_nr2_x56(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_rr2_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p5.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_lo);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx_rr2_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p5.one);
const __m256 vtwo = _mm256_load_ps(params->avx_rr2_p5.two);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx_rr2_p5.denorm_cutoff);
for (; batch >= 56 * sizeof(float); batch -= 56 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
input += 56;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
__m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
__m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
__m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias);
__m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias);
__m256 vn6 = _mm256_add_ps(_mm256_mul_ps(vz6, vlog2e), vmagic_bias);
const __m128 vs_lo0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
const __m128 vs_hi0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
const __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo0), vs_hi0, 1);
const __m128 vs_lo1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
const __m128 vs_hi1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
const __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo1), vs_hi1, 1);
const __m128 vs_lo2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23));
const __m128 vs_hi2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 23));
const __m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo2), vs_hi2, 1);
const __m128 vs_lo3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23));
const __m128 vs_hi3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn3, 1)), 23));
const __m256 vs3 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo3), vs_hi3, 1);
const __m128 vs_lo4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23));
const __m128 vs_hi4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn4, 1)), 23));
const __m256 vs4 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo4), vs_hi4, 1);
const __m128 vs_lo5 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23));
const __m128 vs_hi5 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn5, 1)), 23));
const __m256 vs5 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo5), vs_hi5, 1);
const __m128 vs_lo6 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23));
const __m128 vs_hi6 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn6, 1)), 23));
const __m256 vs6 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo6), vs_hi6, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
__m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
__m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
__m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_hi), vz4);
__m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_hi), vz5);
__m256 vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2_hi), vz6);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_lo), vt4);
vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_lo), vt5);
vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2_lo), vt6);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc5, vt0), vc4);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc5, vt1), vc4);
__m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc5, vt2), vc4);
__m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc5, vt3), vc4);
__m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc5, vt4), vc4);
__m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc5, vt5), vc4);
__m256 vp6 = _mm256_add_ps(_mm256_mul_ps(vc5, vt6), vc4);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc3);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc3);
vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc2);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc2);
vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc2);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc1);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc1);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc1);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc1);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc1);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc1);
vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
const __m256 ve0 = _mm256_add_ps(_mm256_mul_ps(vt0, vp0), vs0);
const __m256 ve1 = _mm256_add_ps(_mm256_mul_ps(vt1, vp1), vs1);
const __m256 ve2 = _mm256_add_ps(_mm256_mul_ps(vt2, vp2), vs2);
const __m256 ve3 = _mm256_add_ps(_mm256_mul_ps(vt3, vp3), vs3);
const __m256 ve4 = _mm256_add_ps(_mm256_mul_ps(vt4, vp4), vs4);
const __m256 ve5 = _mm256_add_ps(_mm256_mul_ps(vt5, vp5), vs5);
const __m256 ve6 = _mm256_add_ps(_mm256_mul_ps(vt6, vp6), vs6);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
const __m256 vd6 = _mm256_add_ps(ve6, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
__m256 vr2 = _mm256_rcp_ps(vd2);
__m256 vr3 = _mm256_rcp_ps(vd3);
__m256 vr4 = _mm256_rcp_ps(vd4);
__m256 vr5 = _mm256_rcp_ps(vd5);
__m256 vr6 = _mm256_rcp_ps(vd6);
vr0 = _mm256_mul_ps(vr0, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr0, vd0)));
vr0 = _mm256_mul_ps(vr0, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr0, vd0)));
vr1 = _mm256_mul_ps(vr1, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr1, vd1)));
vr1 = _mm256_mul_ps(vr1, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr1, vd1)));
vr2 = _mm256_mul_ps(vr2, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr2, vd2)));
vr2 = _mm256_mul_ps(vr2, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr2, vd2)));
vr3 = _mm256_mul_ps(vr3, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr3, vd3)));
vr3 = _mm256_mul_ps(vr3, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr3, vd3)));
vr4 = _mm256_mul_ps(vr4, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr4, vd4)));
vr4 = _mm256_mul_ps(vr4, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr4, vd4)));
vr5 = _mm256_mul_ps(vr5, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr5, vd5)));
vr5 = _mm256_mul_ps(vr5, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr5, vd5)));
vr6 = _mm256_mul_ps(vr6, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr6, vd6)));
vr6 = _mm256_mul_ps(vr6, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr6, vd6)));
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
__m256 vf4 = _mm256_mul_ps(ve4, vr4);
__m256 vf5 = _mm256_mul_ps(ve5, vr5);
__m256 vf6 = _mm256_mul_ps(ve6, vr6);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vz6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
vf6 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf6), vf6, vx6);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
output += 56;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 15,498 | 48.047468 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx-rr2-p5-nr2-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx_rr2_p5_nr2_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_rr2_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p5.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_lo);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx_rr2_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p5.one);
const __m256 vtwo = _mm256_load_ps(params->avx_rr2_p5.two);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx_rr2_p5.denorm_cutoff);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
input += 64;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
__m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
__m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
__m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias);
__m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias);
__m256 vn6 = _mm256_add_ps(_mm256_mul_ps(vz6, vlog2e), vmagic_bias);
__m256 vn7 = _mm256_add_ps(_mm256_mul_ps(vz7, vlog2e), vmagic_bias);
const __m128 vs_lo0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
const __m128 vs_hi0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
const __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo0), vs_hi0, 1);
const __m128 vs_lo1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
const __m128 vs_hi1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
const __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo1), vs_hi1, 1);
const __m128 vs_lo2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23));
const __m128 vs_hi2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 23));
const __m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo2), vs_hi2, 1);
const __m128 vs_lo3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23));
const __m128 vs_hi3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn3, 1)), 23));
const __m256 vs3 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo3), vs_hi3, 1);
const __m128 vs_lo4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23));
const __m128 vs_hi4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn4, 1)), 23));
const __m256 vs4 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo4), vs_hi4, 1);
const __m128 vs_lo5 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23));
const __m128 vs_hi5 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn5, 1)), 23));
const __m256 vs5 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo5), vs_hi5, 1);
const __m128 vs_lo6 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23));
const __m128 vs_hi6 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn6, 1)), 23));
const __m256 vs6 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo6), vs_hi6, 1);
const __m128 vs_lo7 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23));
const __m128 vs_hi7 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn7, 1)), 23));
const __m256 vs7 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo7), vs_hi7, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
__m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
__m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
__m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_hi), vz4);
__m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_hi), vz5);
__m256 vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2_hi), vz6);
__m256 vt7 = _mm256_add_ps(_mm256_mul_ps(vn7, vminus_ln2_hi), vz7);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_lo), vt4);
vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_lo), vt5);
vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2_lo), vt6);
vt7 = _mm256_add_ps(_mm256_mul_ps(vn7, vminus_ln2_lo), vt7);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc5, vt0), vc4);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc5, vt1), vc4);
__m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc5, vt2), vc4);
__m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc5, vt3), vc4);
__m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc5, vt4), vc4);
__m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc5, vt5), vc4);
__m256 vp6 = _mm256_add_ps(_mm256_mul_ps(vc5, vt6), vc4);
__m256 vp7 = _mm256_add_ps(_mm256_mul_ps(vc5, vt7), vc4);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc3);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc3);
vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc3);
vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc2);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc2);
vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc2);
vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc2);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc1);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc1);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc1);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc1);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc1);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc1);
vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc1);
vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
const __m256 ve0 = _mm256_add_ps(_mm256_mul_ps(vt0, vp0), vs0);
const __m256 ve1 = _mm256_add_ps(_mm256_mul_ps(vt1, vp1), vs1);
const __m256 ve2 = _mm256_add_ps(_mm256_mul_ps(vt2, vp2), vs2);
const __m256 ve3 = _mm256_add_ps(_mm256_mul_ps(vt3, vp3), vs3);
const __m256 ve4 = _mm256_add_ps(_mm256_mul_ps(vt4, vp4), vs4);
const __m256 ve5 = _mm256_add_ps(_mm256_mul_ps(vt5, vp5), vs5);
const __m256 ve6 = _mm256_add_ps(_mm256_mul_ps(vt6, vp6), vs6);
const __m256 ve7 = _mm256_add_ps(_mm256_mul_ps(vt7, vp7), vs7);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
const __m256 vd6 = _mm256_add_ps(ve6, vone);
const __m256 vd7 = _mm256_add_ps(ve7, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
__m256 vr2 = _mm256_rcp_ps(vd2);
__m256 vr3 = _mm256_rcp_ps(vd3);
__m256 vr4 = _mm256_rcp_ps(vd4);
__m256 vr5 = _mm256_rcp_ps(vd5);
__m256 vr6 = _mm256_rcp_ps(vd6);
__m256 vr7 = _mm256_rcp_ps(vd7);
vr0 = _mm256_mul_ps(vr0, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr0, vd0)));
vr0 = _mm256_mul_ps(vr0, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr0, vd0)));
vr1 = _mm256_mul_ps(vr1, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr1, vd1)));
vr1 = _mm256_mul_ps(vr1, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr1, vd1)));
vr2 = _mm256_mul_ps(vr2, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr2, vd2)));
vr2 = _mm256_mul_ps(vr2, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr2, vd2)));
vr3 = _mm256_mul_ps(vr3, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr3, vd3)));
vr3 = _mm256_mul_ps(vr3, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr3, vd3)));
vr4 = _mm256_mul_ps(vr4, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr4, vd4)));
vr4 = _mm256_mul_ps(vr4, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr4, vd4)));
vr5 = _mm256_mul_ps(vr5, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr5, vd5)));
vr5 = _mm256_mul_ps(vr5, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr5, vd5)));
vr6 = _mm256_mul_ps(vr6, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr6, vd6)));
vr6 = _mm256_mul_ps(vr6, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr6, vd6)));
vr7 = _mm256_mul_ps(vr7, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr7, vd7)));
vr7 = _mm256_mul_ps(vr7, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr7, vd7)));
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
__m256 vf4 = _mm256_mul_ps(ve4, vr4);
__m256 vf5 = _mm256_mul_ps(ve5, vr5);
__m256 vf6 = _mm256_mul_ps(ve6, vr6);
__m256 vf7 = _mm256_mul_ps(ve7, vr7);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vz6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vz7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
vf6 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf6), vf6, vx6);
vf7 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf7), vf7, vx7);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
output += 64;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 16,964 | 49.044248 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx-rr2-p5-nr2-x72.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx_rr2_p5_nr2_x72(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_rr2_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p5.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_lo);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx_rr2_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p5.one);
const __m256 vtwo = _mm256_load_ps(params->avx_rr2_p5.two);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx_rr2_p5.denorm_cutoff);
for (; batch >= 72 * sizeof(float); batch -= 72 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
input += 72;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
const __m256 vz8 = _mm256_or_ps(vx8, vsign_mask);
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
__m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
__m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
__m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias);
__m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias);
__m256 vn6 = _mm256_add_ps(_mm256_mul_ps(vz6, vlog2e), vmagic_bias);
__m256 vn7 = _mm256_add_ps(_mm256_mul_ps(vz7, vlog2e), vmagic_bias);
__m256 vn8 = _mm256_add_ps(_mm256_mul_ps(vz8, vlog2e), vmagic_bias);
const __m128 vs_lo0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
const __m128 vs_hi0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
const __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo0), vs_hi0, 1);
const __m128 vs_lo1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
const __m128 vs_hi1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
const __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo1), vs_hi1, 1);
const __m128 vs_lo2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23));
const __m128 vs_hi2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 23));
const __m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo2), vs_hi2, 1);
const __m128 vs_lo3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23));
const __m128 vs_hi3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn3, 1)), 23));
const __m256 vs3 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo3), vs_hi3, 1);
const __m128 vs_lo4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23));
const __m128 vs_hi4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn4, 1)), 23));
const __m256 vs4 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo4), vs_hi4, 1);
const __m128 vs_lo5 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23));
const __m128 vs_hi5 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn5, 1)), 23));
const __m256 vs5 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo5), vs_hi5, 1);
const __m128 vs_lo6 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23));
const __m128 vs_hi6 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn6, 1)), 23));
const __m256 vs6 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo6), vs_hi6, 1);
const __m128 vs_lo7 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23));
const __m128 vs_hi7 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn7, 1)), 23));
const __m256 vs7 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo7), vs_hi7, 1);
const __m128 vs_lo8 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn8)), 23));
const __m128 vs_hi8 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn8, 1)), 23));
const __m256 vs8 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo8), vs_hi8, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
__m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
__m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
__m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_hi), vz4);
__m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_hi), vz5);
__m256 vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2_hi), vz6);
__m256 vt7 = _mm256_add_ps(_mm256_mul_ps(vn7, vminus_ln2_hi), vz7);
__m256 vt8 = _mm256_add_ps(_mm256_mul_ps(vn8, vminus_ln2_hi), vz8);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_lo), vt4);
vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_lo), vt5);
vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2_lo), vt6);
vt7 = _mm256_add_ps(_mm256_mul_ps(vn7, vminus_ln2_lo), vt7);
vt8 = _mm256_add_ps(_mm256_mul_ps(vn8, vminus_ln2_lo), vt8);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc5, vt0), vc4);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc5, vt1), vc4);
__m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc5, vt2), vc4);
__m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc5, vt3), vc4);
__m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc5, vt4), vc4);
__m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc5, vt5), vc4);
__m256 vp6 = _mm256_add_ps(_mm256_mul_ps(vc5, vt6), vc4);
__m256 vp7 = _mm256_add_ps(_mm256_mul_ps(vc5, vt7), vc4);
__m256 vp8 = _mm256_add_ps(_mm256_mul_ps(vc5, vt8), vc4);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc3);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc3);
vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc3);
vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc3);
vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc2);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc2);
vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc2);
vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc2);
vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc2);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc1);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc1);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc1);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc1);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc1);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc1);
vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc1);
vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc1);
vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
const __m256 ve0 = _mm256_add_ps(_mm256_mul_ps(vt0, vp0), vs0);
const __m256 ve1 = _mm256_add_ps(_mm256_mul_ps(vt1, vp1), vs1);
const __m256 ve2 = _mm256_add_ps(_mm256_mul_ps(vt2, vp2), vs2);
const __m256 ve3 = _mm256_add_ps(_mm256_mul_ps(vt3, vp3), vs3);
const __m256 ve4 = _mm256_add_ps(_mm256_mul_ps(vt4, vp4), vs4);
const __m256 ve5 = _mm256_add_ps(_mm256_mul_ps(vt5, vp5), vs5);
const __m256 ve6 = _mm256_add_ps(_mm256_mul_ps(vt6, vp6), vs6);
const __m256 ve7 = _mm256_add_ps(_mm256_mul_ps(vt7, vp7), vs7);
const __m256 ve8 = _mm256_add_ps(_mm256_mul_ps(vt8, vp8), vs8);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
const __m256 vd6 = _mm256_add_ps(ve6, vone);
const __m256 vd7 = _mm256_add_ps(ve7, vone);
const __m256 vd8 = _mm256_add_ps(ve8, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
__m256 vr2 = _mm256_rcp_ps(vd2);
__m256 vr3 = _mm256_rcp_ps(vd3);
__m256 vr4 = _mm256_rcp_ps(vd4);
__m256 vr5 = _mm256_rcp_ps(vd5);
__m256 vr6 = _mm256_rcp_ps(vd6);
__m256 vr7 = _mm256_rcp_ps(vd7);
__m256 vr8 = _mm256_rcp_ps(vd8);
vr0 = _mm256_mul_ps(vr0, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr0, vd0)));
vr0 = _mm256_mul_ps(vr0, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr0, vd0)));
vr1 = _mm256_mul_ps(vr1, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr1, vd1)));
vr1 = _mm256_mul_ps(vr1, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr1, vd1)));
vr2 = _mm256_mul_ps(vr2, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr2, vd2)));
vr2 = _mm256_mul_ps(vr2, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr2, vd2)));
vr3 = _mm256_mul_ps(vr3, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr3, vd3)));
vr3 = _mm256_mul_ps(vr3, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr3, vd3)));
vr4 = _mm256_mul_ps(vr4, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr4, vd4)));
vr4 = _mm256_mul_ps(vr4, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr4, vd4)));
vr5 = _mm256_mul_ps(vr5, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr5, vd5)));
vr5 = _mm256_mul_ps(vr5, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr5, vd5)));
vr6 = _mm256_mul_ps(vr6, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr6, vd6)));
vr6 = _mm256_mul_ps(vr6, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr6, vd6)));
vr7 = _mm256_mul_ps(vr7, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr7, vd7)));
vr7 = _mm256_mul_ps(vr7, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr7, vd7)));
vr8 = _mm256_mul_ps(vr8, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr8, vd8)));
vr8 = _mm256_mul_ps(vr8, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr8, vd8)));
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
__m256 vf4 = _mm256_mul_ps(ve4, vr4);
__m256 vf5 = _mm256_mul_ps(ve5, vr5);
__m256 vf6 = _mm256_mul_ps(ve6, vr6);
__m256 vf7 = _mm256_mul_ps(ve7, vr7);
__m256 vf8 = _mm256_mul_ps(ve8, vr8);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vz6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vz7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vz8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
vf6 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf6), vf6, vx6);
vf7 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf7), vf7, vx7);
vf8 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf8), vf8, vx8);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
output += 72;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 18,430 | 49.914365 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx-rr2-p5-nr2-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx_rr2_p5_nr2_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_rr2_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p5.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_lo);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx_rr2_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p5.one);
const __m256 vtwo = _mm256_load_ps(params->avx_rr2_p5.two);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx_rr2_p5.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 5,120 | 37.795455 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx-rr2-p5-nr2-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx_rr2_p5_nr2_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx_rr2_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p5.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p5.minus_ln2_lo);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx_rr2_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p5.one);
const __m256 vtwo = _mm256_load_ps(params->avx_rr2_p5.two);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx_rr2_p5.denorm_cutoff);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
const __m256 vx9 = _mm256_loadu_ps(input + 72);
input += 80;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
const __m256 vz8 = _mm256_or_ps(vx8, vsign_mask);
const __m256 vz9 = _mm256_or_ps(vx9, vsign_mask);
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
__m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
__m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
__m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias);
__m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias);
__m256 vn6 = _mm256_add_ps(_mm256_mul_ps(vz6, vlog2e), vmagic_bias);
__m256 vn7 = _mm256_add_ps(_mm256_mul_ps(vz7, vlog2e), vmagic_bias);
__m256 vn8 = _mm256_add_ps(_mm256_mul_ps(vz8, vlog2e), vmagic_bias);
__m256 vn9 = _mm256_add_ps(_mm256_mul_ps(vz9, vlog2e), vmagic_bias);
const __m128 vs_lo0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
const __m128 vs_hi0 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
const __m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo0), vs_hi0, 1);
const __m128 vs_lo1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
const __m128 vs_hi1 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
const __m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo1), vs_hi1, 1);
const __m128 vs_lo2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23));
const __m128 vs_hi2 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 23));
const __m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo2), vs_hi2, 1);
const __m128 vs_lo3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23));
const __m128 vs_hi3 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn3, 1)), 23));
const __m256 vs3 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo3), vs_hi3, 1);
const __m128 vs_lo4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23));
const __m128 vs_hi4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn4, 1)), 23));
const __m256 vs4 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo4), vs_hi4, 1);
const __m128 vs_lo5 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23));
const __m128 vs_hi5 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn5, 1)), 23));
const __m256 vs5 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo5), vs_hi5, 1);
const __m128 vs_lo6 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn6)), 23));
const __m128 vs_hi6 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn6, 1)), 23));
const __m256 vs6 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo6), vs_hi6, 1);
const __m128 vs_lo7 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn7)), 23));
const __m128 vs_hi7 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn7, 1)), 23));
const __m256 vs7 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo7), vs_hi7, 1);
const __m128 vs_lo8 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn8)), 23));
const __m128 vs_hi8 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn8, 1)), 23));
const __m256 vs8 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo8), vs_hi8, 1);
const __m128 vs_lo9 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn9)), 23));
const __m128 vs_hi9 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn9, 1)), 23));
const __m256 vs9 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo9), vs_hi9, 1);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
__m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
__m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
__m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_hi), vz4);
__m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_hi), vz5);
__m256 vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2_hi), vz6);
__m256 vt7 = _mm256_add_ps(_mm256_mul_ps(vn7, vminus_ln2_hi), vz7);
__m256 vt8 = _mm256_add_ps(_mm256_mul_ps(vn8, vminus_ln2_hi), vz8);
__m256 vt9 = _mm256_add_ps(_mm256_mul_ps(vn9, vminus_ln2_hi), vz9);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_lo), vt4);
vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_lo), vt5);
vt6 = _mm256_add_ps(_mm256_mul_ps(vn6, vminus_ln2_lo), vt6);
vt7 = _mm256_add_ps(_mm256_mul_ps(vn7, vminus_ln2_lo), vt7);
vt8 = _mm256_add_ps(_mm256_mul_ps(vn8, vminus_ln2_lo), vt8);
vt9 = _mm256_add_ps(_mm256_mul_ps(vn9, vminus_ln2_lo), vt9);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc5, vt0), vc4);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc5, vt1), vc4);
__m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc5, vt2), vc4);
__m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc5, vt3), vc4);
__m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc5, vt4), vc4);
__m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc5, vt5), vc4);
__m256 vp6 = _mm256_add_ps(_mm256_mul_ps(vc5, vt6), vc4);
__m256 vp7 = _mm256_add_ps(_mm256_mul_ps(vc5, vt7), vc4);
__m256 vp8 = _mm256_add_ps(_mm256_mul_ps(vc5, vt8), vc4);
__m256 vp9 = _mm256_add_ps(_mm256_mul_ps(vc5, vt9), vc4);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc3);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc3);
vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc3);
vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc3);
vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc3);
vp9 = _mm256_add_ps(_mm256_mul_ps(vp9, vt9), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc2);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc2);
vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc2);
vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc2);
vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc2);
vp9 = _mm256_add_ps(_mm256_mul_ps(vp9, vt9), vc2);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc1);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc1);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc1);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc1);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc1);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc1);
vp6 = _mm256_add_ps(_mm256_mul_ps(vp6, vt6), vc1);
vp7 = _mm256_add_ps(_mm256_mul_ps(vp7, vt7), vc1);
vp8 = _mm256_add_ps(_mm256_mul_ps(vp8, vt8), vc1);
vp9 = _mm256_add_ps(_mm256_mul_ps(vp9, vt9), vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
const __m256 ve0 = _mm256_add_ps(_mm256_mul_ps(vt0, vp0), vs0);
const __m256 ve1 = _mm256_add_ps(_mm256_mul_ps(vt1, vp1), vs1);
const __m256 ve2 = _mm256_add_ps(_mm256_mul_ps(vt2, vp2), vs2);
const __m256 ve3 = _mm256_add_ps(_mm256_mul_ps(vt3, vp3), vs3);
const __m256 ve4 = _mm256_add_ps(_mm256_mul_ps(vt4, vp4), vs4);
const __m256 ve5 = _mm256_add_ps(_mm256_mul_ps(vt5, vp5), vs5);
const __m256 ve6 = _mm256_add_ps(_mm256_mul_ps(vt6, vp6), vs6);
const __m256 ve7 = _mm256_add_ps(_mm256_mul_ps(vt7, vp7), vs7);
const __m256 ve8 = _mm256_add_ps(_mm256_mul_ps(vt8, vp8), vs8);
const __m256 ve9 = _mm256_add_ps(_mm256_mul_ps(vt9, vp9), vs9);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
const __m256 vd6 = _mm256_add_ps(ve6, vone);
const __m256 vd7 = _mm256_add_ps(ve7, vone);
const __m256 vd8 = _mm256_add_ps(ve8, vone);
const __m256 vd9 = _mm256_add_ps(ve9, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
__m256 vr2 = _mm256_rcp_ps(vd2);
__m256 vr3 = _mm256_rcp_ps(vd3);
__m256 vr4 = _mm256_rcp_ps(vd4);
__m256 vr5 = _mm256_rcp_ps(vd5);
__m256 vr6 = _mm256_rcp_ps(vd6);
__m256 vr7 = _mm256_rcp_ps(vd7);
__m256 vr8 = _mm256_rcp_ps(vd8);
__m256 vr9 = _mm256_rcp_ps(vd9);
vr0 = _mm256_mul_ps(vr0, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr0, vd0)));
vr0 = _mm256_mul_ps(vr0, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr0, vd0)));
vr1 = _mm256_mul_ps(vr1, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr1, vd1)));
vr1 = _mm256_mul_ps(vr1, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr1, vd1)));
vr2 = _mm256_mul_ps(vr2, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr2, vd2)));
vr2 = _mm256_mul_ps(vr2, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr2, vd2)));
vr3 = _mm256_mul_ps(vr3, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr3, vd3)));
vr3 = _mm256_mul_ps(vr3, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr3, vd3)));
vr4 = _mm256_mul_ps(vr4, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr4, vd4)));
vr4 = _mm256_mul_ps(vr4, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr4, vd4)));
vr5 = _mm256_mul_ps(vr5, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr5, vd5)));
vr5 = _mm256_mul_ps(vr5, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr5, vd5)));
vr6 = _mm256_mul_ps(vr6, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr6, vd6)));
vr6 = _mm256_mul_ps(vr6, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr6, vd6)));
vr7 = _mm256_mul_ps(vr7, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr7, vd7)));
vr7 = _mm256_mul_ps(vr7, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr7, vd7)));
vr8 = _mm256_mul_ps(vr8, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr8, vd8)));
vr8 = _mm256_mul_ps(vr8, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr8, vd8)));
vr9 = _mm256_mul_ps(vr9, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr9, vd9)));
vr9 = _mm256_mul_ps(vr9, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr9, vd9)));
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
__m256 vf2 = _mm256_mul_ps(ve2, vr2);
__m256 vf3 = _mm256_mul_ps(ve3, vr3);
__m256 vf4 = _mm256_mul_ps(ve4, vr4);
__m256 vf5 = _mm256_mul_ps(ve5, vr5);
__m256 vf6 = _mm256_mul_ps(ve6, vr6);
__m256 vf7 = _mm256_mul_ps(ve7, vr7);
__m256 vf8 = _mm256_mul_ps(ve8, vr8);
__m256 vf9 = _mm256_mul_ps(ve9, vr9);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vz6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vz7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vz8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vz9, vdenorm_cutoff, _CMP_LT_OS), vf9);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
vf6 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf6), vf6, vx6);
vf7 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf7), vf7, vx7);
vf8 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf8), vf8, vx8);
vf9 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf9), vf9, vx9);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
_mm256_storeu_ps(output + 72, vf9);
output += 80;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
const __m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc5, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_add_ps(_mm256_mul_ps(vt, vp), vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
vr = _mm256_mul_ps(vr, _mm256_sub_ps(vtwo, _mm256_mul_ps(vr, vd)));
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 19,896 | 50.680519 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-div-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_div_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
input += 16;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
__m256 vf0 = _mm256_div_ps(ve0, vd0);
__m256 vf1 = _mm256_div_ps(ve1, vd1);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 5,796 | 33.301775 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-div-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_div_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
input += 24;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
__m256 vf0 = _mm256_div_ps(ve0, vd0);
__m256 vf1 = _mm256_div_ps(ve1, vd1);
__m256 vf2 = _mm256_div_ps(ve2, vd2);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 6,695 | 35 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-div-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_div_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
input += 32;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
__m256 vf0 = _mm256_div_ps(ve0, vd0);
__m256 vf1 = _mm256_div_ps(ve1, vd1);
__m256 vf2 = _mm256_div_ps(ve2, vd2);
__m256 vf3 = _mm256_div_ps(ve3, vd3);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 7,594 | 36.413793 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-div-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_div_x40(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 40 * sizeof(float); batch -= 40 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
input += 40;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
__m256 vf0 = _mm256_div_ps(ve0, vd0);
__m256 vf1 = _mm256_div_ps(ve1, vd1);
__m256 vf2 = _mm256_div_ps(ve2, vd2);
__m256 vf3 = _mm256_div_ps(ve3, vd3);
__m256 vf4 = _mm256_div_ps(ve4, vd4);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
output += 40;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 8,493 | 37.609091 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-div-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_div_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
input += 48;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 ve5 = _mm256_fmadd_ps(vt5, vp5, vs5);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
__m256 vf0 = _mm256_div_ps(ve0, vd0);
__m256 vf1 = _mm256_div_ps(ve1, vd1);
__m256 vf2 = _mm256_div_ps(ve2, vd2);
__m256 vf3 = _mm256_div_ps(ve3, vd3);
__m256 vf4 = _mm256_div_ps(ve4, vd4);
__m256 vf5 = _mm256_div_ps(ve5, vd5);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
output += 48;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 9,392 | 38.632911 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-div-x56.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_div_x56(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 56 * sizeof(float); batch -= 56 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
input += 56;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 ve5 = _mm256_fmadd_ps(vt5, vp5, vs5);
const __m256 ve6 = _mm256_fmadd_ps(vt6, vp6, vs6);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
const __m256 vd6 = _mm256_add_ps(ve6, vone);
__m256 vf0 = _mm256_div_ps(ve0, vd0);
__m256 vf1 = _mm256_div_ps(ve1, vd1);
__m256 vf2 = _mm256_div_ps(ve2, vd2);
__m256 vf3 = _mm256_div_ps(ve3, vd3);
__m256 vf4 = _mm256_div_ps(ve4, vd4);
__m256 vf5 = _mm256_div_ps(ve5, vd5);
__m256 vf6 = _mm256_div_ps(ve6, vd6);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vz6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
vf6 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf6), vf6, vx6);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
output += 56;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 10,291 | 39.519685 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-div-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_div_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
input += 64;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 ve5 = _mm256_fmadd_ps(vt5, vp5, vs5);
const __m256 ve6 = _mm256_fmadd_ps(vt6, vp6, vs6);
const __m256 ve7 = _mm256_fmadd_ps(vt7, vp7, vs7);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
const __m256 vd6 = _mm256_add_ps(ve6, vone);
const __m256 vd7 = _mm256_add_ps(ve7, vone);
__m256 vf0 = _mm256_div_ps(ve0, vd0);
__m256 vf1 = _mm256_div_ps(ve1, vd1);
__m256 vf2 = _mm256_div_ps(ve2, vd2);
__m256 vf3 = _mm256_div_ps(ve3, vd3);
__m256 vf4 = _mm256_div_ps(ve4, vd4);
__m256 vf5 = _mm256_div_ps(ve5, vd5);
__m256 vf6 = _mm256_div_ps(ve6, vd6);
__m256 vf7 = _mm256_div_ps(ve7, vd7);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vz6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vz7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
vf6 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf6), vf6, vx6);
vf7 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf7), vf7, vx7);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
output += 64;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 11,190 | 40.295203 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-div-x72.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_div_x72(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 72 * sizeof(float); batch -= 72 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
input += 72;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
const __m256 vz8 = _mm256_or_ps(vx8, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 ve5 = _mm256_fmadd_ps(vt5, vp5, vs5);
const __m256 ve6 = _mm256_fmadd_ps(vt6, vp6, vs6);
const __m256 ve7 = _mm256_fmadd_ps(vt7, vp7, vs7);
const __m256 ve8 = _mm256_fmadd_ps(vt8, vp8, vs8);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
const __m256 vd6 = _mm256_add_ps(ve6, vone);
const __m256 vd7 = _mm256_add_ps(ve7, vone);
const __m256 vd8 = _mm256_add_ps(ve8, vone);
__m256 vf0 = _mm256_div_ps(ve0, vd0);
__m256 vf1 = _mm256_div_ps(ve1, vd1);
__m256 vf2 = _mm256_div_ps(ve2, vd2);
__m256 vf3 = _mm256_div_ps(ve3, vd3);
__m256 vf4 = _mm256_div_ps(ve4, vd4);
__m256 vf5 = _mm256_div_ps(ve5, vd5);
__m256 vf6 = _mm256_div_ps(ve6, vd6);
__m256 vf7 = _mm256_div_ps(ve7, vd7);
__m256 vf8 = _mm256_div_ps(ve8, vd8);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vz6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vz7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vz8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
vf6 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf6), vf6, vx6);
vf7 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf7), vf7, vx7);
vf8 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf8), vf8, vx8);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
output += 72;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 12,089 | 40.979167 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-div-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_div_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 3,885 | 32.791304 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-div-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_div_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
const __m256 vx4 = _mm256_loadu_ps(input + 32);
const __m256 vx5 = _mm256_loadu_ps(input + 40);
const __m256 vx6 = _mm256_loadu_ps(input + 48);
const __m256 vx7 = _mm256_loadu_ps(input + 56);
const __m256 vx8 = _mm256_loadu_ps(input + 64);
const __m256 vx9 = _mm256_loadu_ps(input + 72);
input += 80;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
const __m256 vz4 = _mm256_or_ps(vx4, vsign_mask);
const __m256 vz5 = _mm256_or_ps(vx5, vsign_mask);
const __m256 vz6 = _mm256_or_ps(vx6, vsign_mask);
const __m256 vz7 = _mm256_or_ps(vx7, vsign_mask);
const __m256 vz8 = _mm256_or_ps(vx8, vsign_mask);
const __m256 vz9 = _mm256_or_ps(vx9, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vz9, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
const __m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vz9);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
__m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
__m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
__m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
__m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
__m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
__m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
__m256 vp8 = _mm256_fmadd_ps(vc5, vt8, vc4);
__m256 vp9 = _mm256_fmadd_ps(vc5, vt9, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
vt2 = _mm256_mul_ps(vt2, vs2);
vt3 = _mm256_mul_ps(vt3, vs3);
vt4 = _mm256_mul_ps(vt4, vs4);
vt5 = _mm256_mul_ps(vt5, vs5);
vt6 = _mm256_mul_ps(vt6, vs6);
vt7 = _mm256_mul_ps(vt7, vs7);
vt8 = _mm256_mul_ps(vt8, vs8);
vt9 = _mm256_mul_ps(vt9, vs9);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
const __m256 ve4 = _mm256_fmadd_ps(vt4, vp4, vs4);
const __m256 ve5 = _mm256_fmadd_ps(vt5, vp5, vs5);
const __m256 ve6 = _mm256_fmadd_ps(vt6, vp6, vs6);
const __m256 ve7 = _mm256_fmadd_ps(vt7, vp7, vs7);
const __m256 ve8 = _mm256_fmadd_ps(vt8, vp8, vs8);
const __m256 ve9 = _mm256_fmadd_ps(vt9, vp9, vs9);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
const __m256 vd2 = _mm256_add_ps(ve2, vone);
const __m256 vd3 = _mm256_add_ps(ve3, vone);
const __m256 vd4 = _mm256_add_ps(ve4, vone);
const __m256 vd5 = _mm256_add_ps(ve5, vone);
const __m256 vd6 = _mm256_add_ps(ve6, vone);
const __m256 vd7 = _mm256_add_ps(ve7, vone);
const __m256 vd8 = _mm256_add_ps(ve8, vone);
const __m256 vd9 = _mm256_add_ps(ve9, vone);
__m256 vf0 = _mm256_div_ps(ve0, vd0);
__m256 vf1 = _mm256_div_ps(ve1, vd1);
__m256 vf2 = _mm256_div_ps(ve2, vd2);
__m256 vf3 = _mm256_div_ps(ve3, vd3);
__m256 vf4 = _mm256_div_ps(ve4, vd4);
__m256 vf5 = _mm256_div_ps(ve5, vd5);
__m256 vf6 = _mm256_div_ps(ve6, vd6);
__m256 vf7 = _mm256_div_ps(ve7, vd7);
__m256 vf8 = _mm256_div_ps(ve8, vd8);
__m256 vf9 = _mm256_div_ps(ve9, vd9);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vz4, vdenorm_cutoff, _CMP_LT_OS), vf4);
vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vz5, vdenorm_cutoff, _CMP_LT_OS), vf5);
vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vz6, vdenorm_cutoff, _CMP_LT_OS), vf6);
vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vz7, vdenorm_cutoff, _CMP_LT_OS), vf7);
vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vz8, vdenorm_cutoff, _CMP_LT_OS), vf8);
vf9 = _mm256_andnot_ps(_mm256_cmp_ps(vz9, vdenorm_cutoff, _CMP_LT_OS), vf9);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
vf4 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf4), vf4, vx4);
vf5 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf5), vf5, vx5);
vf6 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf6), vf6, vx6);
vf7 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf7), vf7, vx7);
vf8 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf8), vf8, vx8);
vf9 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf9), vf9, vx9);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
_mm256_storeu_ps(output + 16, vf2);
_mm256_storeu_ps(output + 24, vf3);
_mm256_storeu_ps(output + 32, vf4);
_mm256_storeu_ps(output + 40, vf5);
_mm256_storeu_ps(output + 48, vf6);
_mm256_storeu_ps(output + 56, vf7);
_mm256_storeu_ps(output + 64, vf8);
_mm256_storeu_ps(output + 72, vf9);
output += 80;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vf = _mm256_div_ps(ve, vd);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 12,988 | 41.586885 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vsigmoid/gen/f32-vsigmoid-avx2-rr1-p5-nr1fma-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_nr1fma_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
input += 16;
const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
__m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
vt0 = _mm256_mul_ps(vt0, vs0);
vt1 = _mm256_mul_ps(vt1, vs1);
const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
const __m256 vd0 = _mm256_add_ps(ve0, vone);
const __m256 vd1 = _mm256_add_ps(ve1, vone);
__m256 vr0 = _mm256_rcp_ps(vd0);
__m256 vr1 = _mm256_rcp_ps(vd1);
vr0 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr0, vd0, vone), vr0, vr0);
vr1 = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr1, vd1, vone), vr1, vr1);
__m256 vf0 = _mm256_mul_ps(ve0, vr0);
__m256 vf1 = _mm256_mul_ps(ve1, vr1);
vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
_mm256_storeu_ps(output, vf0);
_mm256_storeu_ps(output + 8, vf1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
_mm256_storeu_ps(output, vf);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p5.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_or_ps(vx, vsign_mask);
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
const __m256 vd = _mm256_add_ps(ve, vone);
__m256 vr = _mm256_rcp_ps(vd);
vr = _mm256_fmadd_ps(_mm256_fnmadd_ps(vr, vd, vone), vr, vr);
__m256 vf = _mm256_mul_ps(ve, vr);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
}
}
| 6,220 | 33.561111 | 120 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.