repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-x144.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x144(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m512 vi_max = _mm512_set1_ps(*max);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vc0 = _mm512_set1_ps(params->avx512_rr1_p5.c0);
__m512 vacc0 = _mm512_setzero_ps();
for (; batch >= 144 * sizeof(float); batch -= 144 * sizeof(float)) {
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
const __m512 vi8 = _mm512_loadu_ps(input + 128);
input += 144;
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
const __m512 vx8 = _mm512_sub_ps(vi8, vi_max);
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vx0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vx1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vx2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vx3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vx4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vx5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vx6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2, vx7);
const __m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2, vx8);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vf4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vf5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vf6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vf7 = _mm512_scalef_ps(vp7, vn7);
const __m512 vf8 = _mm512_scalef_ps(vp8, vn8);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
_mm512_storeu_ps(output + 128, vf8);
output += 144;
vacc0 = _mm512_add_ps(vacc0, vf0);
vacc0 = _mm512_add_ps(vacc0, vf1);
vacc0 = _mm512_add_ps(vacc0, vf2);
vacc0 = _mm512_add_ps(vacc0, vf3);
vacc0 = _mm512_add_ps(vacc0, vf4);
vacc0 = _mm512_add_ps(vacc0, vf5);
vacc0 = _mm512_add_ps(vacc0, vf6);
vacc0 = _mm512_add_ps(vacc0, vf7);
vacc0 = _mm512_add_ps(vacc0, vf8);
}
__m512 vacc = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
const __m512 vx = _mm512_sub_ps(vi, vi_max);
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_storeu_ps(output, vf);
output += 16;
vacc = _mm512_add_ps(vacc, vf);
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vx = _mm512_sub_ps(vi, vi_max);
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_mask_storeu_ps(output, vmask, vf);
vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
}
*sum = _mm512_reduce_add_ps(vacc);
}
| 8,417 | 37.438356 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-x160-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x160_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m512 vi_max = _mm512_set1_ps(*max);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vc0 = _mm512_set1_ps(params->avx512_rr1_p5.c0);
__m512 vacc0 = _mm512_setzero_ps();
__m512 vacc1 = _mm512_setzero_ps();
for (; batch >= 160 * sizeof(float); batch -= 160 * sizeof(float)) {
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
const __m512 vi8 = _mm512_loadu_ps(input + 128);
const __m512 vi9 = _mm512_loadu_ps(input + 144);
input += 160;
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
const __m512 vx8 = _mm512_sub_ps(vi8, vi_max);
const __m512 vx9 = _mm512_sub_ps(vi9, vi_max);
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vx0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vx1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vx2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vx3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vx4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vx5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vx6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2, vx7);
const __m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2, vx8);
const __m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2, vx9);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vf4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vf5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vf6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vf7 = _mm512_scalef_ps(vp7, vn7);
const __m512 vf8 = _mm512_scalef_ps(vp8, vn8);
const __m512 vf9 = _mm512_scalef_ps(vp9, vn9);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
_mm512_storeu_ps(output + 128, vf8);
_mm512_storeu_ps(output + 144, vf9);
output += 160;
vacc0 = _mm512_add_ps(vacc0, vf0);
vacc1 = _mm512_add_ps(vacc1, vf1);
vacc0 = _mm512_add_ps(vacc0, vf2);
vacc1 = _mm512_add_ps(vacc1, vf3);
vacc0 = _mm512_add_ps(vacc0, vf4);
vacc1 = _mm512_add_ps(vacc1, vf5);
vacc0 = _mm512_add_ps(vacc0, vf6);
vacc1 = _mm512_add_ps(vacc1, vf7);
vacc0 = _mm512_add_ps(vacc0, vf8);
vacc1 = _mm512_add_ps(vacc1, vf9);
}
vacc0 = _mm512_add_ps(vacc0, vacc1);
__m512 vacc = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
const __m512 vx = _mm512_sub_ps(vi, vi_max);
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_storeu_ps(output, vf);
output += 16;
vacc = _mm512_add_ps(vacc, vf);
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vx = _mm512_sub_ps(vi, vi_max);
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_mask_storeu_ps(output, vmask, vf);
vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
}
*sum = _mm512_reduce_add_ps(vacc);
}
| 9,089 | 38.012876 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-x160-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x160_acc5(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m512 vi_max = _mm512_set1_ps(*max);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vc0 = _mm512_set1_ps(params->avx512_rr1_p5.c0);
__m512 vacc0 = _mm512_setzero_ps();
__m512 vacc1 = _mm512_setzero_ps();
__m512 vacc2 = _mm512_setzero_ps();
__m512 vacc3 = _mm512_setzero_ps();
__m512 vacc4 = _mm512_setzero_ps();
for (; batch >= 160 * sizeof(float); batch -= 160 * sizeof(float)) {
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
const __m512 vi8 = _mm512_loadu_ps(input + 128);
const __m512 vi9 = _mm512_loadu_ps(input + 144);
input += 160;
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
const __m512 vx8 = _mm512_sub_ps(vi8, vi_max);
const __m512 vx9 = _mm512_sub_ps(vi9, vi_max);
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vx0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vx1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vx2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vx3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vx4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vx5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vx6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2, vx7);
const __m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2, vx8);
const __m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2, vx9);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vf4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vf5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vf6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vf7 = _mm512_scalef_ps(vp7, vn7);
const __m512 vf8 = _mm512_scalef_ps(vp8, vn8);
const __m512 vf9 = _mm512_scalef_ps(vp9, vn9);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
_mm512_storeu_ps(output + 128, vf8);
_mm512_storeu_ps(output + 144, vf9);
output += 160;
vacc0 = _mm512_add_ps(vacc0, vf0);
vacc1 = _mm512_add_ps(vacc1, vf1);
vacc2 = _mm512_add_ps(vacc2, vf2);
vacc3 = _mm512_add_ps(vacc3, vf3);
vacc4 = _mm512_add_ps(vacc4, vf4);
vacc0 = _mm512_add_ps(vacc0, vf5);
vacc1 = _mm512_add_ps(vacc1, vf6);
vacc2 = _mm512_add_ps(vacc2, vf7);
vacc3 = _mm512_add_ps(vacc3, vf8);
vacc4 = _mm512_add_ps(vacc4, vf9);
}
vacc0 = _mm512_add_ps(vacc0, vacc1);
vacc2 = _mm512_add_ps(vacc2, vacc3);
vacc0 = _mm512_add_ps(vacc0, vacc2);
vacc0 = _mm512_add_ps(vacc0, vacc4);
__m512 vacc = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
const __m512 vx = _mm512_sub_ps(vi, vi_max);
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_storeu_ps(output, vf);
output += 16;
vacc = _mm512_add_ps(vacc, vf);
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vx = _mm512_sub_ps(vi, vi_max);
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_mask_storeu_ps(output, vmask, vf);
vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
}
*sum = _mm512_reduce_add_ps(vacc);
}
| 9,320 | 38 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-x160.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x160(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m512 vi_max = _mm512_set1_ps(*max);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vc0 = _mm512_set1_ps(params->avx512_rr1_p5.c0);
__m512 vacc0 = _mm512_setzero_ps();
for (; batch >= 160 * sizeof(float); batch -= 160 * sizeof(float)) {
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
const __m512 vi8 = _mm512_loadu_ps(input + 128);
const __m512 vi9 = _mm512_loadu_ps(input + 144);
input += 160;
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
const __m512 vx8 = _mm512_sub_ps(vi8, vi_max);
const __m512 vx9 = _mm512_sub_ps(vi9, vi_max);
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vx0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vx1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vx2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vx3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vx4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vx5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vx6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2, vx7);
const __m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2, vx8);
const __m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2, vx9);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vf4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vf5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vf6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vf7 = _mm512_scalef_ps(vp7, vn7);
const __m512 vf8 = _mm512_scalef_ps(vp8, vn8);
const __m512 vf9 = _mm512_scalef_ps(vp9, vn9);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
_mm512_storeu_ps(output + 128, vf8);
_mm512_storeu_ps(output + 144, vf9);
output += 160;
vacc0 = _mm512_add_ps(vacc0, vf0);
vacc0 = _mm512_add_ps(vacc0, vf1);
vacc0 = _mm512_add_ps(vacc0, vf2);
vacc0 = _mm512_add_ps(vacc0, vf3);
vacc0 = _mm512_add_ps(vacc0, vf4);
vacc0 = _mm512_add_ps(vacc0, vf5);
vacc0 = _mm512_add_ps(vacc0, vf6);
vacc0 = _mm512_add_ps(vacc0, vf7);
vacc0 = _mm512_add_ps(vacc0, vf8);
vacc0 = _mm512_add_ps(vacc0, vf9);
}
__m512 vacc = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
const __m512 vx = _mm512_sub_ps(vi, vi_max);
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_storeu_ps(output, vf);
output += 16;
vacc = _mm512_add_ps(vacc, vf);
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vx = _mm512_sub_ps(vi, vi_max);
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_mask_storeu_ps(output, vmask, vf);
vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
}
*sum = _mm512_reduce_add_ps(vacc);
}
| 9,007 | 37.995671 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-x192-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x192_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m512 vi_max = _mm512_set1_ps(*max);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vc0 = _mm512_set1_ps(params->avx512_rr1_p5.c0);
__m512 vacc0 = _mm512_setzero_ps();
__m512 vacc1 = _mm512_setzero_ps();
for (; batch >= 192 * sizeof(float); batch -= 192 * sizeof(float)) {
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
const __m512 vi8 = _mm512_loadu_ps(input + 128);
const __m512 vi9 = _mm512_loadu_ps(input + 144);
const __m512 vi10 = _mm512_loadu_ps(input + 160);
const __m512 vi11 = _mm512_loadu_ps(input + 176);
input += 192;
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
const __m512 vx8 = _mm512_sub_ps(vi8, vi_max);
const __m512 vx9 = _mm512_sub_ps(vi9, vi_max);
const __m512 vx10 = _mm512_sub_ps(vi10, vi_max);
const __m512 vx11 = _mm512_sub_ps(vi11, vi_max);
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
const __m512 vn10 = _mm512_roundscale_ps(_mm512_mul_ps(vx10, vlog2e), 0);
const __m512 vn11 = _mm512_roundscale_ps(_mm512_mul_ps(vx11, vlog2e), 0);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vx0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vx1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vx2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vx3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vx4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vx5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vx6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2, vx7);
const __m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2, vx8);
const __m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2, vx9);
const __m512 vt10 = _mm512_fmadd_ps(vn10, vminus_ln2, vx10);
const __m512 vt11 = _mm512_fmadd_ps(vn11, vminus_ln2, vx11);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
__m512 vp10 = _mm512_fmadd_ps(vc5, vt10, vc4);
__m512 vp11 = _mm512_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc0);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc0);
const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vf4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vf5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vf6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vf7 = _mm512_scalef_ps(vp7, vn7);
const __m512 vf8 = _mm512_scalef_ps(vp8, vn8);
const __m512 vf9 = _mm512_scalef_ps(vp9, vn9);
const __m512 vf10 = _mm512_scalef_ps(vp10, vn10);
const __m512 vf11 = _mm512_scalef_ps(vp11, vn11);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
_mm512_storeu_ps(output + 128, vf8);
_mm512_storeu_ps(output + 144, vf9);
_mm512_storeu_ps(output + 160, vf10);
_mm512_storeu_ps(output + 176, vf11);
output += 192;
vacc0 = _mm512_add_ps(vacc0, vf0);
vacc1 = _mm512_add_ps(vacc1, vf1);
vacc0 = _mm512_add_ps(vacc0, vf2);
vacc1 = _mm512_add_ps(vacc1, vf3);
vacc0 = _mm512_add_ps(vacc0, vf4);
vacc1 = _mm512_add_ps(vacc1, vf5);
vacc0 = _mm512_add_ps(vacc0, vf6);
vacc1 = _mm512_add_ps(vacc1, vf7);
vacc0 = _mm512_add_ps(vacc0, vf8);
vacc1 = _mm512_add_ps(vacc1, vf9);
vacc0 = _mm512_add_ps(vacc0, vf10);
vacc1 = _mm512_add_ps(vacc1, vf11);
}
vacc0 = _mm512_add_ps(vacc0, vacc1);
__m512 vacc = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
const __m512 vx = _mm512_sub_ps(vi, vi_max);
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_storeu_ps(output, vf);
output += 16;
vacc = _mm512_add_ps(vacc, vf);
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vx = _mm512_sub_ps(vi, vi_max);
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_mask_storeu_ps(output, vmask, vf);
vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
}
*sum = _mm512_reduce_add_ps(vacc);
}
| 10,323 | 39.171206 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-x192-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x192_acc3(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m512 vi_max = _mm512_set1_ps(*max);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vc0 = _mm512_set1_ps(params->avx512_rr1_p5.c0);
__m512 vacc0 = _mm512_setzero_ps();
__m512 vacc1 = _mm512_setzero_ps();
__m512 vacc2 = _mm512_setzero_ps();
for (; batch >= 192 * sizeof(float); batch -= 192 * sizeof(float)) {
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
const __m512 vi8 = _mm512_loadu_ps(input + 128);
const __m512 vi9 = _mm512_loadu_ps(input + 144);
const __m512 vi10 = _mm512_loadu_ps(input + 160);
const __m512 vi11 = _mm512_loadu_ps(input + 176);
input += 192;
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
const __m512 vx8 = _mm512_sub_ps(vi8, vi_max);
const __m512 vx9 = _mm512_sub_ps(vi9, vi_max);
const __m512 vx10 = _mm512_sub_ps(vi10, vi_max);
const __m512 vx11 = _mm512_sub_ps(vi11, vi_max);
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
const __m512 vn10 = _mm512_roundscale_ps(_mm512_mul_ps(vx10, vlog2e), 0);
const __m512 vn11 = _mm512_roundscale_ps(_mm512_mul_ps(vx11, vlog2e), 0);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vx0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vx1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vx2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vx3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vx4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vx5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vx6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2, vx7);
const __m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2, vx8);
const __m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2, vx9);
const __m512 vt10 = _mm512_fmadd_ps(vn10, vminus_ln2, vx10);
const __m512 vt11 = _mm512_fmadd_ps(vn11, vminus_ln2, vx11);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
__m512 vp10 = _mm512_fmadd_ps(vc5, vt10, vc4);
__m512 vp11 = _mm512_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc0);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc0);
const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vf4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vf5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vf6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vf7 = _mm512_scalef_ps(vp7, vn7);
const __m512 vf8 = _mm512_scalef_ps(vp8, vn8);
const __m512 vf9 = _mm512_scalef_ps(vp9, vn9);
const __m512 vf10 = _mm512_scalef_ps(vp10, vn10);
const __m512 vf11 = _mm512_scalef_ps(vp11, vn11);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
_mm512_storeu_ps(output + 128, vf8);
_mm512_storeu_ps(output + 144, vf9);
_mm512_storeu_ps(output + 160, vf10);
_mm512_storeu_ps(output + 176, vf11);
output += 192;
vacc0 = _mm512_add_ps(vacc0, vf0);
vacc1 = _mm512_add_ps(vacc1, vf1);
vacc2 = _mm512_add_ps(vacc2, vf2);
vacc0 = _mm512_add_ps(vacc0, vf3);
vacc1 = _mm512_add_ps(vacc1, vf4);
vacc2 = _mm512_add_ps(vacc2, vf5);
vacc0 = _mm512_add_ps(vacc0, vf6);
vacc1 = _mm512_add_ps(vacc1, vf7);
vacc2 = _mm512_add_ps(vacc2, vf8);
vacc0 = _mm512_add_ps(vacc0, vf9);
vacc1 = _mm512_add_ps(vacc1, vf10);
vacc2 = _mm512_add_ps(vacc2, vf11);
}
vacc0 = _mm512_add_ps(vacc0, vacc1);
vacc0 = _mm512_add_ps(vacc0, vacc2);
__m512 vacc = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
const __m512 vx = _mm512_sub_ps(vi, vi_max);
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_storeu_ps(output, vf);
output += 16;
vacc = _mm512_add_ps(vacc, vf);
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vx = _mm512_sub_ps(vi, vi_max);
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_mask_storeu_ps(output, vmask, vf);
vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
}
*sum = _mm512_reduce_add_ps(vacc);
}
| 10,400 | 39.158301 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-x192-acc6.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x192_acc6(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m512 vi_max = _mm512_set1_ps(*max);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vc0 = _mm512_set1_ps(params->avx512_rr1_p5.c0);
__m512 vacc0 = _mm512_setzero_ps();
__m512 vacc1 = _mm512_setzero_ps();
__m512 vacc2 = _mm512_setzero_ps();
__m512 vacc3 = _mm512_setzero_ps();
__m512 vacc4 = _mm512_setzero_ps();
__m512 vacc5 = _mm512_setzero_ps();
for (; batch >= 192 * sizeof(float); batch -= 192 * sizeof(float)) {
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
const __m512 vi8 = _mm512_loadu_ps(input + 128);
const __m512 vi9 = _mm512_loadu_ps(input + 144);
const __m512 vi10 = _mm512_loadu_ps(input + 160);
const __m512 vi11 = _mm512_loadu_ps(input + 176);
input += 192;
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
const __m512 vx8 = _mm512_sub_ps(vi8, vi_max);
const __m512 vx9 = _mm512_sub_ps(vi9, vi_max);
const __m512 vx10 = _mm512_sub_ps(vi10, vi_max);
const __m512 vx11 = _mm512_sub_ps(vi11, vi_max);
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
const __m512 vn10 = _mm512_roundscale_ps(_mm512_mul_ps(vx10, vlog2e), 0);
const __m512 vn11 = _mm512_roundscale_ps(_mm512_mul_ps(vx11, vlog2e), 0);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vx0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vx1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vx2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vx3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vx4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vx5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vx6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2, vx7);
const __m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2, vx8);
const __m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2, vx9);
const __m512 vt10 = _mm512_fmadd_ps(vn10, vminus_ln2, vx10);
const __m512 vt11 = _mm512_fmadd_ps(vn11, vminus_ln2, vx11);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
__m512 vp10 = _mm512_fmadd_ps(vc5, vt10, vc4);
__m512 vp11 = _mm512_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc0);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc0);
const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vf4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vf5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vf6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vf7 = _mm512_scalef_ps(vp7, vn7);
const __m512 vf8 = _mm512_scalef_ps(vp8, vn8);
const __m512 vf9 = _mm512_scalef_ps(vp9, vn9);
const __m512 vf10 = _mm512_scalef_ps(vp10, vn10);
const __m512 vf11 = _mm512_scalef_ps(vp11, vn11);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
_mm512_storeu_ps(output + 128, vf8);
_mm512_storeu_ps(output + 144, vf9);
_mm512_storeu_ps(output + 160, vf10);
_mm512_storeu_ps(output + 176, vf11);
output += 192;
vacc0 = _mm512_add_ps(vacc0, vf0);
vacc1 = _mm512_add_ps(vacc1, vf1);
vacc2 = _mm512_add_ps(vacc2, vf2);
vacc3 = _mm512_add_ps(vacc3, vf3);
vacc4 = _mm512_add_ps(vacc4, vf4);
vacc5 = _mm512_add_ps(vacc5, vf5);
vacc0 = _mm512_add_ps(vacc0, vf6);
vacc1 = _mm512_add_ps(vacc1, vf7);
vacc2 = _mm512_add_ps(vacc2, vf8);
vacc3 = _mm512_add_ps(vacc3, vf9);
vacc4 = _mm512_add_ps(vacc4, vf10);
vacc5 = _mm512_add_ps(vacc5, vf11);
}
vacc0 = _mm512_add_ps(vacc0, vacc1);
vacc2 = _mm512_add_ps(vacc2, vacc3);
vacc4 = _mm512_add_ps(vacc4, vacc5);
vacc0 = _mm512_add_ps(vacc0, vacc2);
vacc0 = _mm512_add_ps(vacc0, vacc4);
__m512 vacc = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
const __m512 vx = _mm512_sub_ps(vi, vi_max);
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_storeu_ps(output, vf);
output += 16;
vacc = _mm512_add_ps(vacc, vf);
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vx = _mm512_sub_ps(vi, vi_max);
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_mask_storeu_ps(output, vmask, vf);
vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
}
*sum = _mm512_reduce_add_ps(vacc);
}
| 10,631 | 39.120755 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-avx512f-rr1-p5-scalef-x192.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/avx512f-rr1-p5-scalef.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__avx512f_rr1_p5_scalef_x192(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m512 vi_max = _mm512_set1_ps(*max);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
const __m512 vc0 = _mm512_set1_ps(params->avx512_rr1_p5.c0);
__m512 vacc0 = _mm512_setzero_ps();
for (; batch >= 192 * sizeof(float); batch -= 192 * sizeof(float)) {
const __m512 vi0 = _mm512_loadu_ps(input);
const __m512 vi1 = _mm512_loadu_ps(input + 16);
const __m512 vi2 = _mm512_loadu_ps(input + 32);
const __m512 vi3 = _mm512_loadu_ps(input + 48);
const __m512 vi4 = _mm512_loadu_ps(input + 64);
const __m512 vi5 = _mm512_loadu_ps(input + 80);
const __m512 vi6 = _mm512_loadu_ps(input + 96);
const __m512 vi7 = _mm512_loadu_ps(input + 112);
const __m512 vi8 = _mm512_loadu_ps(input + 128);
const __m512 vi9 = _mm512_loadu_ps(input + 144);
const __m512 vi10 = _mm512_loadu_ps(input + 160);
const __m512 vi11 = _mm512_loadu_ps(input + 176);
input += 192;
const __m512 vx0 = _mm512_sub_ps(vi0, vi_max);
const __m512 vx1 = _mm512_sub_ps(vi1, vi_max);
const __m512 vx2 = _mm512_sub_ps(vi2, vi_max);
const __m512 vx3 = _mm512_sub_ps(vi3, vi_max);
const __m512 vx4 = _mm512_sub_ps(vi4, vi_max);
const __m512 vx5 = _mm512_sub_ps(vi5, vi_max);
const __m512 vx6 = _mm512_sub_ps(vi6, vi_max);
const __m512 vx7 = _mm512_sub_ps(vi7, vi_max);
const __m512 vx8 = _mm512_sub_ps(vi8, vi_max);
const __m512 vx9 = _mm512_sub_ps(vi9, vi_max);
const __m512 vx10 = _mm512_sub_ps(vi10, vi_max);
const __m512 vx11 = _mm512_sub_ps(vi11, vi_max);
const __m512 vn0 = _mm512_roundscale_ps(_mm512_mul_ps(vx0, vlog2e), 0);
const __m512 vn1 = _mm512_roundscale_ps(_mm512_mul_ps(vx1, vlog2e), 0);
const __m512 vn2 = _mm512_roundscale_ps(_mm512_mul_ps(vx2, vlog2e), 0);
const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0);
const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);
const __m512 vn5 = _mm512_roundscale_ps(_mm512_mul_ps(vx5, vlog2e), 0);
const __m512 vn6 = _mm512_roundscale_ps(_mm512_mul_ps(vx6, vlog2e), 0);
const __m512 vn7 = _mm512_roundscale_ps(_mm512_mul_ps(vx7, vlog2e), 0);
const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0);
const __m512 vn9 = _mm512_roundscale_ps(_mm512_mul_ps(vx9, vlog2e), 0);
const __m512 vn10 = _mm512_roundscale_ps(_mm512_mul_ps(vx10, vlog2e), 0);
const __m512 vn11 = _mm512_roundscale_ps(_mm512_mul_ps(vx11, vlog2e), 0);
const __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vx0);
const __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vx1);
const __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vx2);
const __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vx3);
const __m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vx4);
const __m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vx5);
const __m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vx6);
const __m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2, vx7);
const __m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2, vx8);
const __m512 vt9 = _mm512_fmadd_ps(vn9, vminus_ln2, vx9);
const __m512 vt10 = _mm512_fmadd_ps(vn10, vminus_ln2, vx10);
const __m512 vt11 = _mm512_fmadd_ps(vn11, vminus_ln2, vx11);
__m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
__m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
__m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
__m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
__m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
__m512 vp5 = _mm512_fmadd_ps(vc5, vt5, vc4);
__m512 vp6 = _mm512_fmadd_ps(vc5, vt6, vc4);
__m512 vp7 = _mm512_fmadd_ps(vc5, vt7, vc4);
__m512 vp8 = _mm512_fmadd_ps(vc5, vt8, vc4);
__m512 vp9 = _mm512_fmadd_ps(vc5, vt9, vc4);
__m512 vp10 = _mm512_fmadd_ps(vc5, vt10, vc4);
__m512 vp11 = _mm512_fmadd_ps(vc5, vt11, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc3);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc3);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc2);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc2);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc2);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc1);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc1);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc1);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc1);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc1);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc1);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc1);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc0);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc0);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc0);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc0);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc0);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc0);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc0);
vp8 = _mm512_fmadd_ps(vp8, vt8, vc0);
vp9 = _mm512_fmadd_ps(vp9, vt9, vc0);
vp10 = _mm512_fmadd_ps(vp10, vt10, vc0);
vp11 = _mm512_fmadd_ps(vp11, vt11, vc0);
const __m512 vf0 = _mm512_scalef_ps(vp0, vn0);
const __m512 vf1 = _mm512_scalef_ps(vp1, vn1);
const __m512 vf2 = _mm512_scalef_ps(vp2, vn2);
const __m512 vf3 = _mm512_scalef_ps(vp3, vn3);
const __m512 vf4 = _mm512_scalef_ps(vp4, vn4);
const __m512 vf5 = _mm512_scalef_ps(vp5, vn5);
const __m512 vf6 = _mm512_scalef_ps(vp6, vn6);
const __m512 vf7 = _mm512_scalef_ps(vp7, vn7);
const __m512 vf8 = _mm512_scalef_ps(vp8, vn8);
const __m512 vf9 = _mm512_scalef_ps(vp9, vn9);
const __m512 vf10 = _mm512_scalef_ps(vp10, vn10);
const __m512 vf11 = _mm512_scalef_ps(vp11, vn11);
_mm512_storeu_ps(output, vf0);
_mm512_storeu_ps(output + 16, vf1);
_mm512_storeu_ps(output + 32, vf2);
_mm512_storeu_ps(output + 48, vf3);
_mm512_storeu_ps(output + 64, vf4);
_mm512_storeu_ps(output + 80, vf5);
_mm512_storeu_ps(output + 96, vf6);
_mm512_storeu_ps(output + 112, vf7);
_mm512_storeu_ps(output + 128, vf8);
_mm512_storeu_ps(output + 144, vf9);
_mm512_storeu_ps(output + 160, vf10);
_mm512_storeu_ps(output + 176, vf11);
output += 192;
vacc0 = _mm512_add_ps(vacc0, vf0);
vacc0 = _mm512_add_ps(vacc0, vf1);
vacc0 = _mm512_add_ps(vacc0, vf2);
vacc0 = _mm512_add_ps(vacc0, vf3);
vacc0 = _mm512_add_ps(vacc0, vf4);
vacc0 = _mm512_add_ps(vacc0, vf5);
vacc0 = _mm512_add_ps(vacc0, vf6);
vacc0 = _mm512_add_ps(vacc0, vf7);
vacc0 = _mm512_add_ps(vacc0, vf8);
vacc0 = _mm512_add_ps(vacc0, vf9);
vacc0 = _mm512_add_ps(vacc0, vf10);
vacc0 = _mm512_add_ps(vacc0, vf11);
}
__m512 vacc = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vi = _mm512_loadu_ps(input);
input += 16;
const __m512 vx = _mm512_sub_ps(vi, vi_max);
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_storeu_ps(output, vf);
output += 16;
vacc = _mm512_add_ps(vacc, vf);
}
if (batch != 0) {
// Prepare mask for valid 32-bit batch (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vi = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vx = _mm512_sub_ps(vi, vi_max);
const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vx, vlog2e), 0);
const __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vx);
__m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_fmadd_ps(vp, vt, vc1);
vp = _mm512_fmadd_ps(vp, vt, vc0);
const __m512 vf = _mm512_scalef_ps(vp, vn);
_mm512_mask_storeu_ps(output, vmask, vf);
vacc = _mm512_mask_add_ps(vacc, vmask, vacc, vf);
}
*sum = _mm512_reduce_add_ps(vacc);
}
| 10,241 | 39.164706 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neon-rr2-lut64-p2-x12-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x12_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.magic_bias);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_lo);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.c2);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vx89AB, vlog2e);
const int32x4_t ve0123 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn0123), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve4567 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn4567), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve89AB = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn89AB), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx23]);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx67]);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxAB]);
vl01 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vx0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vx4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vx89AB, vn89AB, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
vp0123 = vmlaq_f32(vt0123, vt0123, vp0123);
vp4567 = vmlaq_f32(vt4567, vt4567, vp4567);
vp89AB = vmlaq_f32(vt89AB, vt89AB, vp89AB);
float32x4_t vf0123 = vmlaq_f32(vs0123, vs0123, vp0123);
float32x4_t vf4567 = vmlaq_f32(vs4567, vs4567, vp4567);
float32x4_t vf89AB = vmlaq_f32(vs89AB, vs89AB, vp89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
}
vacc0 = vaddq_f32(vacc0, vacc1);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlaq_f32(vt, vt, vp);
float32x4_t vf = vmlaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlaq_f32(vt, vt, vp);
float32x4_t vf = vmlaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 10,171 | 42.470085 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neon-rr2-lut64-p2-x12-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x12_acc3(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.magic_bias);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_lo);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.c2);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
float32x4_t vacc2 = vmovq_n_f32(0.0f);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vx89AB, vlog2e);
const int32x4_t ve0123 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn0123), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve4567 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn4567), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve89AB = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn89AB), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx23]);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx67]);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxAB]);
vl01 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vx0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vx4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vx89AB, vn89AB, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
vp0123 = vmlaq_f32(vt0123, vt0123, vp0123);
vp4567 = vmlaq_f32(vt4567, vt4567, vp4567);
vp89AB = vmlaq_f32(vt89AB, vt89AB, vp89AB);
float32x4_t vf0123 = vmlaq_f32(vs0123, vs0123, vp0123);
float32x4_t vf4567 = vmlaq_f32(vs4567, vs4567, vp4567);
float32x4_t vf89AB = vmlaq_f32(vs89AB, vs89AB, vp89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc1 = vaddq_f32(vacc1, vf4567);
vacc2 = vaddq_f32(vacc2, vf89AB);
}
vacc0 = vaddq_f32(vacc0, vacc1);
vacc0 = vaddq_f32(vacc0, vacc2);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlaq_f32(vt, vt, vp);
float32x4_t vf = vmlaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlaq_f32(vt, vt, vp);
float32x4_t vf = vmlaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 10,247 | 42.423729 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neon-rr2-lut64-p2-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x12(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.magic_bias);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_lo);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.c2);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vx89AB, vlog2e);
const int32x4_t ve0123 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn0123), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve4567 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn4567), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve89AB = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn89AB), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx23]);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx67]);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxAB]);
vl01 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vx0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vx4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vx89AB, vn89AB, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
vp0123 = vmlaq_f32(vt0123, vt0123, vp0123);
vp4567 = vmlaq_f32(vt4567, vt4567, vp4567);
vp89AB = vmlaq_f32(vt89AB, vt89AB, vp89AB);
float32x4_t vf0123 = vmlaq_f32(vs0123, vs0123, vp0123);
float32x4_t vf4567 = vmlaq_f32(vs4567, vs4567, vp4567);
float32x4_t vf89AB = vmlaq_f32(vs89AB, vs89AB, vp89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
}
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlaq_f32(vt, vt, vp);
float32x4_t vf = vmlaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlaq_f32(vt, vt, vp);
float32x4_t vf = vmlaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 10,090 | 42.49569 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neon-rr2-lut64-p2-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x16_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.magic_bias);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_lo);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.c2);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t viCDEF = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vx89AB, vlog2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vxCDEF, vlog2e);
const int32x4_t ve0123 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn0123), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve4567 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn4567), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve89AB = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn89AB), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t veCDEF = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vnCDEF), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx23]);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx67]);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxAB]);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxEF]);
vl01 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vx0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vx4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vx89AB, vn89AB, vminus_ln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vxCDEF, vnCDEF, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
vp0123 = vmlaq_f32(vt0123, vt0123, vp0123);
vp4567 = vmlaq_f32(vt4567, vt4567, vp4567);
vp89AB = vmlaq_f32(vt89AB, vt89AB, vp89AB);
vpCDEF = vmlaq_f32(vtCDEF, vtCDEF, vpCDEF);
float32x4_t vf0123 = vmlaq_f32(vs0123, vs0123, vp0123);
float32x4_t vf4567 = vmlaq_f32(vs4567, vs4567, vp4567);
float32x4_t vf89AB = vmlaq_f32(vs89AB, vs89AB, vp89AB);
float32x4_t vfCDEF = vmlaq_f32(vsCDEF, vsCDEF, vpCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
vacc0 = vaddq_f32(vacc0, vfCDEF);
}
vacc0 = vaddq_f32(vacc0, vacc1);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlaq_f32(vt, vt, vp);
float32x4_t vf = vmlaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlaq_f32(vt, vt, vp);
float32x4_t vf = vmlaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 11,719 | 44.78125 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neon-rr2-lut64-p2-x16-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x16_acc4(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.magic_bias);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_lo);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.c2);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
float32x4_t vacc2 = vmovq_n_f32(0.0f);
float32x4_t vacc3 = vmovq_n_f32(0.0f);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t viCDEF = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vx89AB, vlog2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vxCDEF, vlog2e);
const int32x4_t ve0123 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn0123), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve4567 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn4567), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve89AB = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn89AB), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t veCDEF = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vnCDEF), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx23]);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx67]);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxAB]);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxEF]);
vl01 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vx0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vx4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vx89AB, vn89AB, vminus_ln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vxCDEF, vnCDEF, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
vp0123 = vmlaq_f32(vt0123, vt0123, vp0123);
vp4567 = vmlaq_f32(vt4567, vt4567, vp4567);
vp89AB = vmlaq_f32(vt89AB, vt89AB, vp89AB);
vpCDEF = vmlaq_f32(vtCDEF, vtCDEF, vpCDEF);
float32x4_t vf0123 = vmlaq_f32(vs0123, vs0123, vp0123);
float32x4_t vf4567 = vmlaq_f32(vs4567, vs4567, vp4567);
float32x4_t vf89AB = vmlaq_f32(vs89AB, vs89AB, vp89AB);
float32x4_t vfCDEF = vmlaq_f32(vsCDEF, vsCDEF, vpCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
vacc0 = vaddq_f32(vacc0, vfCDEF);
}
vacc0 = vaddq_f32(vacc0, vacc1);
vacc2 = vaddq_f32(vacc2, vacc3);
vacc0 = vaddq_f32(vacc0, vacc2);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlaq_f32(vt, vt, vp);
float32x4_t vf = vmlaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlaq_f32(vt, vt, vp);
float32x4_t vf = vmlaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 11,871 | 44.661538 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neon-rr2-lut64-p2-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x16(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.magic_bias);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_lo);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.c2);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t viCDEF = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vx89AB, vlog2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vxCDEF, vlog2e);
const int32x4_t ve0123 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn0123), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve4567 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn4567), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve89AB = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn89AB), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t veCDEF = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vnCDEF), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx23]);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx67]);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxAB]);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxEF]);
vl01 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vx0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vx4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vx89AB, vn89AB, vminus_ln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vxCDEF, vnCDEF, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
vp0123 = vmlaq_f32(vt0123, vt0123, vp0123);
vp4567 = vmlaq_f32(vt4567, vt4567, vp4567);
vp89AB = vmlaq_f32(vt89AB, vt89AB, vp89AB);
vpCDEF = vmlaq_f32(vtCDEF, vtCDEF, vpCDEF);
float32x4_t vf0123 = vmlaq_f32(vs0123, vs0123, vp0123);
float32x4_t vf4567 = vmlaq_f32(vs4567, vs4567, vp4567);
float32x4_t vf89AB = vmlaq_f32(vs89AB, vs89AB, vp89AB);
float32x4_t vfCDEF = vmlaq_f32(vsCDEF, vsCDEF, vpCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
vacc0 = vaddq_f32(vacc0, vfCDEF);
}
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlaq_f32(vt, vt, vp);
float32x4_t vf = vmlaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlaq_f32(vt, vt, vp);
float32x4_t vf = vmlaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 11,638 | 44.822835 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neon-rr2-lut64-p2-x20-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x20_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.magic_bias);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_lo);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.c2);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t viCDEF = vld1q_f32(input); input += 4;
const float32x4_t viGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
const float32x4_t vxGHIJ = vsubq_f32(viGHIJ, vi_max);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vx89AB, vlog2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vxCDEF, vlog2e);
float32x4_t vnGHIJ = vmlaq_f32(vmagic_bias, vxGHIJ, vlog2e);
const int32x4_t ve0123 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn0123), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve4567 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn4567), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve89AB = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn89AB), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t veCDEF = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vnCDEF), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t veGHIJ = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vnGHIJ), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx23]);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx67]);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxAB]);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxEF]);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxIJ]);
vl01 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vx0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vx4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vx89AB, vn89AB, vminus_ln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vxCDEF, vnCDEF, vminus_ln2_hi);
float32x4_t vtGHIJ = vmlaq_f32(vxGHIJ, vnGHIJ, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
vtGHIJ = vmlaq_f32(vtGHIJ, vnGHIJ, vminus_ln2_lo);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc2);
vp0123 = vmlaq_f32(vt0123, vt0123, vp0123);
vp4567 = vmlaq_f32(vt4567, vt4567, vp4567);
vp89AB = vmlaq_f32(vt89AB, vt89AB, vp89AB);
vpCDEF = vmlaq_f32(vtCDEF, vtCDEF, vpCDEF);
vpGHIJ = vmlaq_f32(vtGHIJ, vtGHIJ, vpGHIJ);
float32x4_t vf0123 = vmlaq_f32(vs0123, vs0123, vp0123);
float32x4_t vf4567 = vmlaq_f32(vs4567, vs4567, vp4567);
float32x4_t vf89AB = vmlaq_f32(vs89AB, vs89AB, vp89AB);
float32x4_t vfCDEF = vmlaq_f32(vsCDEF, vsCDEF, vpCDEF);
float32x4_t vfGHIJ = vmlaq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcltq_f32(vxGHIJ, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
vacc0 = vaddq_f32(vacc0, vfCDEF);
vacc0 = vaddq_f32(vacc0, vfGHIJ);
}
vacc0 = vaddq_f32(vacc0, vacc1);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlaq_f32(vt, vt, vp);
float32x4_t vf = vmlaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlaq_f32(vt, vt, vp);
float32x4_t vf = vmlaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 13,267 | 46.726619 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neon-rr2-lut64-p2-x20-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x20_acc5(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.magic_bias);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_lo);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.c2);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
float32x4_t vacc2 = vmovq_n_f32(0.0f);
float32x4_t vacc3 = vmovq_n_f32(0.0f);
float32x4_t vacc4 = vmovq_n_f32(0.0f);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t viCDEF = vld1q_f32(input); input += 4;
const float32x4_t viGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
const float32x4_t vxGHIJ = vsubq_f32(viGHIJ, vi_max);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vx89AB, vlog2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vxCDEF, vlog2e);
float32x4_t vnGHIJ = vmlaq_f32(vmagic_bias, vxGHIJ, vlog2e);
const int32x4_t ve0123 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn0123), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve4567 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn4567), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve89AB = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn89AB), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t veCDEF = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vnCDEF), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t veGHIJ = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vnGHIJ), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx23]);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx67]);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxAB]);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxEF]);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxIJ]);
vl01 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vx0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vx4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vx89AB, vn89AB, vminus_ln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vxCDEF, vnCDEF, vminus_ln2_hi);
float32x4_t vtGHIJ = vmlaq_f32(vxGHIJ, vnGHIJ, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
vtGHIJ = vmlaq_f32(vtGHIJ, vnGHIJ, vminus_ln2_lo);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc2);
vp0123 = vmlaq_f32(vt0123, vt0123, vp0123);
vp4567 = vmlaq_f32(vt4567, vt4567, vp4567);
vp89AB = vmlaq_f32(vt89AB, vt89AB, vp89AB);
vpCDEF = vmlaq_f32(vtCDEF, vtCDEF, vpCDEF);
vpGHIJ = vmlaq_f32(vtGHIJ, vtGHIJ, vpGHIJ);
float32x4_t vf0123 = vmlaq_f32(vs0123, vs0123, vp0123);
float32x4_t vf4567 = vmlaq_f32(vs4567, vs4567, vp4567);
float32x4_t vf89AB = vmlaq_f32(vs89AB, vs89AB, vp89AB);
float32x4_t vfCDEF = vmlaq_f32(vsCDEF, vsCDEF, vpCDEF);
float32x4_t vfGHIJ = vmlaq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcltq_f32(vxGHIJ, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc4 = vaddq_f32(vacc4, vf4567);
vacc3 = vaddq_f32(vacc3, vf89AB);
vacc2 = vaddq_f32(vacc2, vfCDEF);
vacc1 = vaddq_f32(vacc1, vfGHIJ);
}
vacc0 = vaddq_f32(vacc0, vacc1);
vacc2 = vaddq_f32(vacc2, vacc3);
vacc0 = vaddq_f32(vacc0, vacc2);
vacc0 = vaddq_f32(vacc0, vacc4);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlaq_f32(vt, vt, vp);
float32x4_t vf = vmlaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlaq_f32(vt, vt, vp);
float32x4_t vf = vmlaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 13,495 | 46.521127 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neon-rr2-lut64-p2-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x20(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.magic_bias);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_lo);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.c2);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t viCDEF = vld1q_f32(input); input += 4;
const float32x4_t viGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
const float32x4_t vxGHIJ = vsubq_f32(viGHIJ, vi_max);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vx89AB, vlog2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vxCDEF, vlog2e);
float32x4_t vnGHIJ = vmlaq_f32(vmagic_bias, vxGHIJ, vlog2e);
const int32x4_t ve0123 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn0123), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve4567 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn4567), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve89AB = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn89AB), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t veCDEF = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vnCDEF), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t veGHIJ = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vnGHIJ), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx23]);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx67]);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxAB]);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxEF]);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxIJ]);
vl01 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vx0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vx4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vx89AB, vn89AB, vminus_ln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vxCDEF, vnCDEF, vminus_ln2_hi);
float32x4_t vtGHIJ = vmlaq_f32(vxGHIJ, vnGHIJ, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
vtGHIJ = vmlaq_f32(vtGHIJ, vnGHIJ, vminus_ln2_lo);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc2);
vp0123 = vmlaq_f32(vt0123, vt0123, vp0123);
vp4567 = vmlaq_f32(vt4567, vt4567, vp4567);
vp89AB = vmlaq_f32(vt89AB, vt89AB, vp89AB);
vpCDEF = vmlaq_f32(vtCDEF, vtCDEF, vpCDEF);
vpGHIJ = vmlaq_f32(vtGHIJ, vtGHIJ, vpGHIJ);
float32x4_t vf0123 = vmlaq_f32(vs0123, vs0123, vp0123);
float32x4_t vf4567 = vmlaq_f32(vs4567, vs4567, vp4567);
float32x4_t vf89AB = vmlaq_f32(vs89AB, vs89AB, vp89AB);
float32x4_t vfCDEF = vmlaq_f32(vsCDEF, vsCDEF, vpCDEF);
float32x4_t vfGHIJ = vmlaq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcltq_f32(vxGHIJ, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
vacc0 = vaddq_f32(vacc0, vfCDEF);
vacc0 = vaddq_f32(vacc0, vfGHIJ);
}
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlaq_f32(vt, vt, vp);
float32x4_t vf = vmlaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlaq_f32(vt, vt, vp);
float32x4_t vf = vmlaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 13,186 | 46.778986 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neon-rr2-lut64-p2-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x4(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.magic_bias);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_lo);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.c2);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.denorm_cutoff);
float32x4_t vacc = vmovq_n_f32(0.0f);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlaq_f32(vt, vt, vp);
float32x4_t vf = vmlaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlaq_f32(vt, vt, vp);
float32x4_t vf = vmlaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 5,326 | 35.486301 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neon-rr2-lut64-p2-x8-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x8_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.magic_bias);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_lo);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.c2);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vx4567, vlog2e);
const int32x4_t ve0123 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn0123), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve4567 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn4567), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx23]);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx67]);
vl01 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vx0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vx4567, vn4567, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
vp0123 = vmlaq_f32(vt0123, vt0123, vp0123);
vp4567 = vmlaq_f32(vt4567, vt4567, vp4567);
float32x4_t vf0123 = vmlaq_f32(vs0123, vs0123, vp0123);
float32x4_t vf4567 = vmlaq_f32(vs4567, vs4567, vp4567);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
}
vacc0 = vaddq_f32(vacc0, vacc1);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlaq_f32(vt, vt, vp);
float32x4_t vf = vmlaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlaq_f32(vt, vt, vp);
float32x4_t vf = vmlaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 8,620 | 39.665094 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neon-rr2-lut64-p2-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_lut64_p2_x8(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.magic_bias);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_ln2_lo);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.c2);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vx4567, vlog2e);
const int32x4_t ve0123 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn0123), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve4567 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn4567), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx23]);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx67]);
vl01 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vx0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vx4567, vn4567, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
vp0123 = vmlaq_f32(vt0123, vt0123, vp0123);
vp4567 = vmlaq_f32(vt4567, vt4567, vp4567);
float32x4_t vf0123 = vmlaq_f32(vs0123, vs0123, vp0123);
float32x4_t vf4567 = vmlaq_f32(vs4567, vs4567, vp4567);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
}
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlaq_f32(vt, vt, vp);
float32x4_t vf = vmlaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vmlaq_f32(vt, vt, vp);
float32x4_t vf = vmlaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 8,539 | 39.666667 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neon-rr2-p5-x12-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x12_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_p5.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_p5.magic_bias);
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_lo);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_rr2_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_rr2_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neon_rr2_p5.c1);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_p5.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vx89AB, vlog2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vx0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vx4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vx89AB, vn89AB, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vmlaq_f32(vc4, vc5, vt89AB);
vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
vp89AB = vmlaq_f32(vc3, vp89AB, vt89AB);
vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
vp89AB = vmlaq_f32(vc2, vp89AB, vt89AB);
vp0123 = vmlaq_f32(vc1, vp0123, vt0123);
vp4567 = vmlaq_f32(vc1, vp4567, vt4567);
vp89AB = vmlaq_f32(vc1, vp89AB, vt89AB);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
float32x4_t vf0123 = vmlaq_f32(vs0123, vp0123, vt0123);
float32x4_t vf4567 = vmlaq_f32(vs4567, vp4567, vt4567);
float32x4_t vf89AB = vmlaq_f32(vs89AB, vp89AB, vt89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
}
vacc0 = vaddq_f32(vacc0, vacc1);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vmlaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vmlaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 7,178 | 34.539604 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neon-rr2-p5-x12-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x12_acc3(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_p5.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_p5.magic_bias);
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_lo);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_rr2_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_rr2_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neon_rr2_p5.c1);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_p5.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
float32x4_t vacc2 = vmovq_n_f32(0.0f);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vx89AB, vlog2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vx0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vx4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vx89AB, vn89AB, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vmlaq_f32(vc4, vc5, vt89AB);
vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
vp89AB = vmlaq_f32(vc3, vp89AB, vt89AB);
vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
vp89AB = vmlaq_f32(vc2, vp89AB, vt89AB);
vp0123 = vmlaq_f32(vc1, vp0123, vt0123);
vp4567 = vmlaq_f32(vc1, vp4567, vt4567);
vp89AB = vmlaq_f32(vc1, vp89AB, vt89AB);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
float32x4_t vf0123 = vmlaq_f32(vs0123, vp0123, vt0123);
float32x4_t vf4567 = vmlaq_f32(vs4567, vp4567, vt4567);
float32x4_t vf89AB = vmlaq_f32(vs89AB, vp89AB, vt89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc1 = vaddq_f32(vacc1, vf4567);
vacc2 = vaddq_f32(vacc2, vf89AB);
}
vacc0 = vaddq_f32(vacc0, vacc1);
vacc0 = vaddq_f32(vacc0, vacc2);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vmlaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vmlaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 7,254 | 34.563725 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neon-rr2-p5-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x12(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_p5.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_p5.magic_bias);
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_lo);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_rr2_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_rr2_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neon_rr2_p5.c1);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_p5.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vx89AB, vlog2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vx0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vx4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vx89AB, vn89AB, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vmlaq_f32(vc4, vc5, vt89AB);
vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
vp89AB = vmlaq_f32(vc3, vp89AB, vt89AB);
vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
vp89AB = vmlaq_f32(vc2, vp89AB, vt89AB);
vp0123 = vmlaq_f32(vc1, vp0123, vt0123);
vp4567 = vmlaq_f32(vc1, vp4567, vt4567);
vp89AB = vmlaq_f32(vc1, vp89AB, vt89AB);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
float32x4_t vf0123 = vmlaq_f32(vs0123, vp0123, vt0123);
float32x4_t vf4567 = vmlaq_f32(vs4567, vp4567, vt4567);
float32x4_t vf89AB = vmlaq_f32(vs89AB, vp89AB, vt89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
}
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vmlaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vmlaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 7,097 | 34.49 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neon-rr2-p5-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x16_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_p5.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_p5.magic_bias);
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_lo);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_rr2_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_rr2_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neon_rr2_p5.c1);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_p5.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t viCDEF = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vx89AB, vlog2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vxCDEF, vlog2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vx0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vx4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vx89AB, vn89AB, vminus_ln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vxCDEF, vnCDEF, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vmlaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vmlaq_f32(vc4, vc5, vtCDEF);
vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
vp89AB = vmlaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc3, vpCDEF, vtCDEF);
vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
vp89AB = vmlaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc2, vpCDEF, vtCDEF);
vp0123 = vmlaq_f32(vc1, vp0123, vt0123);
vp4567 = vmlaq_f32(vc1, vp4567, vt4567);
vp89AB = vmlaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc1, vpCDEF, vtCDEF);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
float32x4_t vf0123 = vmlaq_f32(vs0123, vp0123, vt0123);
float32x4_t vf4567 = vmlaq_f32(vs4567, vp4567, vt4567);
float32x4_t vf89AB = vmlaq_f32(vs89AB, vp89AB, vt89AB);
float32x4_t vfCDEF = vmlaq_f32(vsCDEF, vpCDEF, vtCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
vacc0 = vaddq_f32(vacc0, vfCDEF);
}
vacc0 = vaddq_f32(vacc0, vacc1);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vmlaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vmlaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 8,115 | 36.229358 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neon-rr2-p5-x16-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x16_acc4(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_p5.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_p5.magic_bias);
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_lo);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_rr2_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_rr2_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neon_rr2_p5.c1);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_p5.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
float32x4_t vacc2 = vmovq_n_f32(0.0f);
float32x4_t vacc3 = vmovq_n_f32(0.0f);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t viCDEF = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vx89AB, vlog2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vxCDEF, vlog2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vx0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vx4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vx89AB, vn89AB, vminus_ln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vxCDEF, vnCDEF, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vmlaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vmlaq_f32(vc4, vc5, vtCDEF);
vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
vp89AB = vmlaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc3, vpCDEF, vtCDEF);
vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
vp89AB = vmlaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc2, vpCDEF, vtCDEF);
vp0123 = vmlaq_f32(vc1, vp0123, vt0123);
vp4567 = vmlaq_f32(vc1, vp4567, vt4567);
vp89AB = vmlaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc1, vpCDEF, vtCDEF);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
float32x4_t vf0123 = vmlaq_f32(vs0123, vp0123, vt0123);
float32x4_t vf4567 = vmlaq_f32(vs4567, vp4567, vt4567);
float32x4_t vf89AB = vmlaq_f32(vs89AB, vp89AB, vt89AB);
float32x4_t vfCDEF = vmlaq_f32(vsCDEF, vpCDEF, vtCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
vacc0 = vaddq_f32(vacc0, vfCDEF);
}
vacc0 = vaddq_f32(vacc0, vacc1);
vacc2 = vaddq_f32(vacc2, vacc3);
vacc0 = vaddq_f32(vacc0, vacc2);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vmlaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vmlaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 8,267 | 36.243243 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neon-rr2-p5-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x16(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_p5.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_p5.magic_bias);
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_lo);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_rr2_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_rr2_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neon_rr2_p5.c1);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_p5.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t viCDEF = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vx89AB, vlog2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vxCDEF, vlog2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vx0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vx4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vx89AB, vn89AB, vminus_ln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vxCDEF, vnCDEF, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vmlaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vmlaq_f32(vc4, vc5, vtCDEF);
vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
vp89AB = vmlaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc3, vpCDEF, vtCDEF);
vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
vp89AB = vmlaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc2, vpCDEF, vtCDEF);
vp0123 = vmlaq_f32(vc1, vp0123, vt0123);
vp4567 = vmlaq_f32(vc1, vp4567, vt4567);
vp89AB = vmlaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc1, vpCDEF, vtCDEF);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
float32x4_t vf0123 = vmlaq_f32(vs0123, vp0123, vt0123);
float32x4_t vf4567 = vmlaq_f32(vs4567, vp4567, vt4567);
float32x4_t vf89AB = vmlaq_f32(vs89AB, vp89AB, vt89AB);
float32x4_t vfCDEF = vmlaq_f32(vsCDEF, vpCDEF, vtCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
vacc0 = vaddq_f32(vacc0, vfCDEF);
}
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vmlaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vmlaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 8,034 | 36.199074 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neon-rr2-p5-x20-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x20_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_p5.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_p5.magic_bias);
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_lo);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_rr2_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_rr2_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neon_rr2_p5.c1);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_p5.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t viCDEF = vld1q_f32(input); input += 4;
const float32x4_t viGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
const float32x4_t vxGHIJ = vsubq_f32(viGHIJ, vi_max);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vx89AB, vlog2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vxCDEF, vlog2e);
float32x4_t vnGHIJ = vmlaq_f32(vmagic_bias, vxGHIJ, vlog2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vx0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vx4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vx89AB, vn89AB, vminus_ln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vxCDEF, vnCDEF, vminus_ln2_hi);
float32x4_t vtGHIJ = vmlaq_f32(vxGHIJ, vnGHIJ, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
vtGHIJ = vmlaq_f32(vtGHIJ, vnGHIJ, vminus_ln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vmlaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vmlaq_f32(vc4, vc5, vtCDEF);
float32x4_t vpGHIJ = vmlaq_f32(vc4, vc5, vtGHIJ);
vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
vp89AB = vmlaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc3, vpCDEF, vtCDEF);
vpGHIJ = vmlaq_f32(vc3, vpGHIJ, vtGHIJ);
vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
vp89AB = vmlaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc2, vpCDEF, vtCDEF);
vpGHIJ = vmlaq_f32(vc2, vpGHIJ, vtGHIJ);
vp0123 = vmlaq_f32(vc1, vp0123, vt0123);
vp4567 = vmlaq_f32(vc1, vp4567, vt4567);
vp89AB = vmlaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc1, vpCDEF, vtCDEF);
vpGHIJ = vmlaq_f32(vc1, vpGHIJ, vtGHIJ);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
float32x4_t vf0123 = vmlaq_f32(vs0123, vp0123, vt0123);
float32x4_t vf4567 = vmlaq_f32(vs4567, vp4567, vt4567);
float32x4_t vf89AB = vmlaq_f32(vs89AB, vp89AB, vt89AB);
float32x4_t vfCDEF = vmlaq_f32(vsCDEF, vpCDEF, vtCDEF);
float32x4_t vfGHIJ = vmlaq_f32(vsGHIJ, vpGHIJ, vtGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcltq_f32(vxGHIJ, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
vacc0 = vaddq_f32(vacc0, vfCDEF);
vacc0 = vaddq_f32(vacc0, vfGHIJ);
}
vacc0 = vaddq_f32(vacc0, vacc1);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vmlaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vmlaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 9,052 | 37.688034 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neon-rr2-p5-x20-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x20_acc5(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_p5.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_p5.magic_bias);
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_lo);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_rr2_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_rr2_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neon_rr2_p5.c1);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_p5.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
float32x4_t vacc2 = vmovq_n_f32(0.0f);
float32x4_t vacc3 = vmovq_n_f32(0.0f);
float32x4_t vacc4 = vmovq_n_f32(0.0f);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t viCDEF = vld1q_f32(input); input += 4;
const float32x4_t viGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
const float32x4_t vxGHIJ = vsubq_f32(viGHIJ, vi_max);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vx89AB, vlog2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vxCDEF, vlog2e);
float32x4_t vnGHIJ = vmlaq_f32(vmagic_bias, vxGHIJ, vlog2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vx0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vx4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vx89AB, vn89AB, vminus_ln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vxCDEF, vnCDEF, vminus_ln2_hi);
float32x4_t vtGHIJ = vmlaq_f32(vxGHIJ, vnGHIJ, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
vtGHIJ = vmlaq_f32(vtGHIJ, vnGHIJ, vminus_ln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vmlaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vmlaq_f32(vc4, vc5, vtCDEF);
float32x4_t vpGHIJ = vmlaq_f32(vc4, vc5, vtGHIJ);
vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
vp89AB = vmlaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc3, vpCDEF, vtCDEF);
vpGHIJ = vmlaq_f32(vc3, vpGHIJ, vtGHIJ);
vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
vp89AB = vmlaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc2, vpCDEF, vtCDEF);
vpGHIJ = vmlaq_f32(vc2, vpGHIJ, vtGHIJ);
vp0123 = vmlaq_f32(vc1, vp0123, vt0123);
vp4567 = vmlaq_f32(vc1, vp4567, vt4567);
vp89AB = vmlaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc1, vpCDEF, vtCDEF);
vpGHIJ = vmlaq_f32(vc1, vpGHIJ, vtGHIJ);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
float32x4_t vf0123 = vmlaq_f32(vs0123, vp0123, vt0123);
float32x4_t vf4567 = vmlaq_f32(vs4567, vp4567, vt4567);
float32x4_t vf89AB = vmlaq_f32(vs89AB, vp89AB, vt89AB);
float32x4_t vfCDEF = vmlaq_f32(vsCDEF, vpCDEF, vtCDEF);
float32x4_t vfGHIJ = vmlaq_f32(vsGHIJ, vpGHIJ, vtGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcltq_f32(vxGHIJ, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc4 = vaddq_f32(vacc4, vf4567);
vacc3 = vaddq_f32(vacc3, vf89AB);
vacc2 = vaddq_f32(vacc2, vfCDEF);
vacc1 = vaddq_f32(vacc1, vfGHIJ);
}
vacc0 = vaddq_f32(vacc0, vacc1);
vacc2 = vaddq_f32(vacc2, vacc3);
vacc0 = vaddq_f32(vacc0, vacc2);
vacc0 = vaddq_f32(vacc0, vacc4);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vmlaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vmlaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 9,280 | 37.670833 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neon-rr2-p5-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x20(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_p5.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_p5.magic_bias);
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_lo);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_rr2_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_rr2_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neon_rr2_p5.c1);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_p5.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t viCDEF = vld1q_f32(input); input += 4;
const float32x4_t viGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
const float32x4_t vxGHIJ = vsubq_f32(viGHIJ, vi_max);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vx89AB, vlog2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vxCDEF, vlog2e);
float32x4_t vnGHIJ = vmlaq_f32(vmagic_bias, vxGHIJ, vlog2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vx0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vx4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vx89AB, vn89AB, vminus_ln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vxCDEF, vnCDEF, vminus_ln2_hi);
float32x4_t vtGHIJ = vmlaq_f32(vxGHIJ, vnGHIJ, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
vtGHIJ = vmlaq_f32(vtGHIJ, vnGHIJ, vminus_ln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vmlaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vmlaq_f32(vc4, vc5, vtCDEF);
float32x4_t vpGHIJ = vmlaq_f32(vc4, vc5, vtGHIJ);
vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
vp89AB = vmlaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc3, vpCDEF, vtCDEF);
vpGHIJ = vmlaq_f32(vc3, vpGHIJ, vtGHIJ);
vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
vp89AB = vmlaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc2, vpCDEF, vtCDEF);
vpGHIJ = vmlaq_f32(vc2, vpGHIJ, vtGHIJ);
vp0123 = vmlaq_f32(vc1, vp0123, vt0123);
vp4567 = vmlaq_f32(vc1, vp4567, vt4567);
vp89AB = vmlaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc1, vpCDEF, vtCDEF);
vpGHIJ = vmlaq_f32(vc1, vpGHIJ, vtGHIJ);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
float32x4_t vf0123 = vmlaq_f32(vs0123, vp0123, vt0123);
float32x4_t vf4567 = vmlaq_f32(vs4567, vp4567, vt4567);
float32x4_t vf89AB = vmlaq_f32(vs89AB, vp89AB, vt89AB);
float32x4_t vfCDEF = vmlaq_f32(vsCDEF, vpCDEF, vtCDEF);
float32x4_t vfGHIJ = vmlaq_f32(vsGHIJ, vpGHIJ, vtGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcltq_f32(vxGHIJ, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
vacc0 = vaddq_f32(vacc0, vfCDEF);
vacc0 = vaddq_f32(vacc0, vfGHIJ);
}
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vmlaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vmlaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 8,971 | 37.672414 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neon-rr2-p5-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x4(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_p5.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_p5.magic_bias);
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_lo);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_rr2_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_rr2_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neon_rr2_p5.c1);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_p5.denorm_cutoff);
float32x4_t vacc = vmovq_n_f32(0.0f);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vmlaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vmlaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 4,167 | 30.338346 | 103 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neon-rr2-p5-x8-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x8_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_p5.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_p5.magic_bias);
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_lo);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_rr2_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_rr2_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neon_rr2_p5.c1);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_p5.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vx4567, vlog2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vx0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vx4567, vn4567, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc4, vc5, vt4567);
vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
vp0123 = vmlaq_f32(vc1, vp0123, vt0123);
vp4567 = vmlaq_f32(vc1, vp4567, vt4567);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
float32x4_t vf0123 = vmlaq_f32(vs0123, vp0123, vt0123);
float32x4_t vf4567 = vmlaq_f32(vs4567, vp4567, vt4567);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
}
vacc0 = vaddq_f32(vacc0, vacc1);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vmlaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vmlaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 6,238 | 32.543011 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neon-rr2-p5-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x8(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_p5.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_p5.magic_bias);
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_p5.minus_ln2_lo);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_rr2_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_rr2_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neon_rr2_p5.c1);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_p5.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vx4567, vlog2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vx0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vx4567, vn4567, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc4, vc5, vt4567);
vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
vp0123 = vmlaq_f32(vc1, vp0123, vt0123);
vp4567 = vmlaq_f32(vc1, vp4567, vt4567);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
float32x4_t vf0123 = vmlaq_f32(vs0123, vp0123, vt0123);
float32x4_t vf4567 = vmlaq_f32(vs4567, vp4567, vt4567);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
}
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vmlaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmlaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vmlaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 6,157 | 32.467391 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neonfma-rr1-lut64-p2-x12-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x12_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vx89AB, vlog2e);
const int32x4_t ve0123 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn0123), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve4567 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn4567), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve89AB = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn89AB), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx23]);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx67]);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxAB]);
vl01 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vx0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vx4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vx89AB, vn89AB, vminus_ln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
vp0123 = vfmaq_f32(vt0123, vt0123, vp0123);
vp4567 = vfmaq_f32(vt4567, vt4567, vp4567);
vp89AB = vfmaq_f32(vt89AB, vt89AB, vp89AB);
float32x4_t vf0123 = vfmaq_f32(vs0123, vs0123, vp0123);
float32x4_t vf4567 = vfmaq_f32(vs4567, vs4567, vp4567);
float32x4_t vf89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
}
vacc0 = vaddq_f32(vacc0, vacc1);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
float32x4_t vf = vfmaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
float32x4_t vf = vfmaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 9,824 | 42.281938 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neonfma-rr1-lut64-p2-x12-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x12_acc3(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
float32x4_t vacc2 = vmovq_n_f32(0.0f);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vx89AB, vlog2e);
const int32x4_t ve0123 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn0123), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve4567 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn4567), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve89AB = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn89AB), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx23]);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx67]);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxAB]);
vl01 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vx0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vx4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vx89AB, vn89AB, vminus_ln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
vp0123 = vfmaq_f32(vt0123, vt0123, vp0123);
vp4567 = vfmaq_f32(vt4567, vt4567, vp4567);
vp89AB = vfmaq_f32(vt89AB, vt89AB, vp89AB);
float32x4_t vf0123 = vfmaq_f32(vs0123, vs0123, vp0123);
float32x4_t vf4567 = vfmaq_f32(vs4567, vs4567, vp4567);
float32x4_t vf89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc1 = vaddq_f32(vacc1, vf4567);
vacc2 = vaddq_f32(vacc2, vf89AB);
}
vacc0 = vaddq_f32(vacc0, vacc1);
vacc0 = vaddq_f32(vacc0, vacc2);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
float32x4_t vf = vfmaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
float32x4_t vf = vfmaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 9,900 | 42.235808 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neonfma-rr1-lut64-p2-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x12(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vx89AB, vlog2e);
const int32x4_t ve0123 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn0123), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve4567 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn4567), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve89AB = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn89AB), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx23]);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx67]);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxAB]);
vl01 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vx0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vx4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vx89AB, vn89AB, vminus_ln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
vp0123 = vfmaq_f32(vt0123, vt0123, vp0123);
vp4567 = vfmaq_f32(vt4567, vt4567, vp4567);
vp89AB = vfmaq_f32(vt89AB, vt89AB, vp89AB);
float32x4_t vf0123 = vfmaq_f32(vs0123, vs0123, vp0123);
float32x4_t vf4567 = vfmaq_f32(vs4567, vs4567, vp4567);
float32x4_t vf89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
}
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
float32x4_t vf = vfmaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
float32x4_t vf = vfmaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 9,743 | 42.306667 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neonfma-rr1-lut64-p2-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x16_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t viCDEF = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vx89AB, vlog2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vxCDEF, vlog2e);
const int32x4_t ve0123 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn0123), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve4567 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn4567), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve89AB = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn89AB), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t veCDEF = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vnCDEF), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx23]);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx67]);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxAB]);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxEF]);
vl01 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vx0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vx4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vx89AB, vn89AB, vminus_ln2);
float32x4_t vtCDEF = vfmaq_f32(vxCDEF, vnCDEF, vminus_ln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
vp0123 = vfmaq_f32(vt0123, vt0123, vp0123);
vp4567 = vfmaq_f32(vt4567, vt4567, vp4567);
vp89AB = vfmaq_f32(vt89AB, vt89AB, vp89AB);
vpCDEF = vfmaq_f32(vtCDEF, vtCDEF, vpCDEF);
float32x4_t vf0123 = vfmaq_f32(vs0123, vs0123, vp0123);
float32x4_t vf4567 = vfmaq_f32(vs4567, vs4567, vp4567);
float32x4_t vf89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
float32x4_t vfCDEF = vfmaq_f32(vsCDEF, vsCDEF, vpCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
vacc0 = vaddq_f32(vacc0, vfCDEF);
}
vacc0 = vaddq_f32(vacc0, vacc1);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
float32x4_t vf = vfmaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
float32x4_t vf = vfmaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 11,314 | 44.625 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neonfma-rr1-lut64-p2-x16-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x16_acc4(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
float32x4_t vacc2 = vmovq_n_f32(0.0f);
float32x4_t vacc3 = vmovq_n_f32(0.0f);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t viCDEF = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vx89AB, vlog2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vxCDEF, vlog2e);
const int32x4_t ve0123 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn0123), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve4567 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn4567), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve89AB = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn89AB), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t veCDEF = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vnCDEF), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx23]);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx67]);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxAB]);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxEF]);
vl01 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vx0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vx4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vx89AB, vn89AB, vminus_ln2);
float32x4_t vtCDEF = vfmaq_f32(vxCDEF, vnCDEF, vminus_ln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
vp0123 = vfmaq_f32(vt0123, vt0123, vp0123);
vp4567 = vfmaq_f32(vt4567, vt4567, vp4567);
vp89AB = vfmaq_f32(vt89AB, vt89AB, vp89AB);
vpCDEF = vfmaq_f32(vtCDEF, vtCDEF, vpCDEF);
float32x4_t vf0123 = vfmaq_f32(vs0123, vs0123, vp0123);
float32x4_t vf4567 = vfmaq_f32(vs4567, vs4567, vp4567);
float32x4_t vf89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
float32x4_t vfCDEF = vfmaq_f32(vsCDEF, vsCDEF, vpCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
vacc0 = vaddq_f32(vacc0, vfCDEF);
}
vacc0 = vaddq_f32(vacc0, vacc1);
vacc2 = vaddq_f32(vacc2, vacc3);
vacc0 = vaddq_f32(vacc0, vacc2);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
float32x4_t vf = vfmaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
float32x4_t vf = vfmaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 11,466 | 44.503968 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neonfma-rr1-lut64-p2-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x16(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t viCDEF = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vx89AB, vlog2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vxCDEF, vlog2e);
const int32x4_t ve0123 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn0123), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve4567 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn4567), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve89AB = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn89AB), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t veCDEF = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vnCDEF), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx23]);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx67]);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxAB]);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxEF]);
vl01 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vx0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vx4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vx89AB, vn89AB, vminus_ln2);
float32x4_t vtCDEF = vfmaq_f32(vxCDEF, vnCDEF, vminus_ln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
vp0123 = vfmaq_f32(vt0123, vt0123, vp0123);
vp4567 = vfmaq_f32(vt4567, vt4567, vp4567);
vp89AB = vfmaq_f32(vt89AB, vt89AB, vp89AB);
vpCDEF = vfmaq_f32(vtCDEF, vtCDEF, vpCDEF);
float32x4_t vf0123 = vfmaq_f32(vs0123, vs0123, vp0123);
float32x4_t vf4567 = vfmaq_f32(vs4567, vs4567, vp4567);
float32x4_t vf89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
float32x4_t vfCDEF = vfmaq_f32(vsCDEF, vsCDEF, vpCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
vacc0 = vaddq_f32(vacc0, vfCDEF);
}
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
float32x4_t vf = vfmaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
float32x4_t vf = vfmaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 11,233 | 44.666667 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neonfma-rr1-lut64-p2-x20-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x20_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t viCDEF = vld1q_f32(input); input += 4;
const float32x4_t viGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
const float32x4_t vxGHIJ = vsubq_f32(viGHIJ, vi_max);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vx89AB, vlog2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vxCDEF, vlog2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vxGHIJ, vlog2e);
const int32x4_t ve0123 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn0123), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve4567 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn4567), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve89AB = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn89AB), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t veCDEF = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vnCDEF), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t veGHIJ = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vnGHIJ), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx23]);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx67]);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxAB]);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxEF]);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxIJ]);
vl01 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vx0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vx4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vx89AB, vn89AB, vminus_ln2);
float32x4_t vtCDEF = vfmaq_f32(vxCDEF, vnCDEF, vminus_ln2);
float32x4_t vtGHIJ = vfmaq_f32(vxGHIJ, vnGHIJ, vminus_ln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc2);
vp0123 = vfmaq_f32(vt0123, vt0123, vp0123);
vp4567 = vfmaq_f32(vt4567, vt4567, vp4567);
vp89AB = vfmaq_f32(vt89AB, vt89AB, vp89AB);
vpCDEF = vfmaq_f32(vtCDEF, vtCDEF, vpCDEF);
vpGHIJ = vfmaq_f32(vtGHIJ, vtGHIJ, vpGHIJ);
float32x4_t vf0123 = vfmaq_f32(vs0123, vs0123, vp0123);
float32x4_t vf4567 = vfmaq_f32(vs4567, vs4567, vp4567);
float32x4_t vf89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
float32x4_t vfCDEF = vfmaq_f32(vsCDEF, vsCDEF, vpCDEF);
float32x4_t vfGHIJ = vfmaq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcltq_f32(vxGHIJ, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
vacc0 = vaddq_f32(vacc0, vfCDEF);
vacc0 = vaddq_f32(vacc0, vfGHIJ);
}
vacc0 = vaddq_f32(vacc0, vacc1);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
float32x4_t vf = vfmaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
float32x4_t vf = vfmaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 12,804 | 46.60223 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neonfma-rr1-lut64-p2-x20-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x20_acc5(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
float32x4_t vacc2 = vmovq_n_f32(0.0f);
float32x4_t vacc3 = vmovq_n_f32(0.0f);
float32x4_t vacc4 = vmovq_n_f32(0.0f);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t viCDEF = vld1q_f32(input); input += 4;
const float32x4_t viGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
const float32x4_t vxGHIJ = vsubq_f32(viGHIJ, vi_max);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vx89AB, vlog2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vxCDEF, vlog2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vxGHIJ, vlog2e);
const int32x4_t ve0123 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn0123), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve4567 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn4567), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve89AB = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn89AB), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t veCDEF = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vnCDEF), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t veGHIJ = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vnGHIJ), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx23]);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx67]);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxAB]);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxEF]);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxIJ]);
vl01 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vx0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vx4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vx89AB, vn89AB, vminus_ln2);
float32x4_t vtCDEF = vfmaq_f32(vxCDEF, vnCDEF, vminus_ln2);
float32x4_t vtGHIJ = vfmaq_f32(vxGHIJ, vnGHIJ, vminus_ln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc2);
vp0123 = vfmaq_f32(vt0123, vt0123, vp0123);
vp4567 = vfmaq_f32(vt4567, vt4567, vp4567);
vp89AB = vfmaq_f32(vt89AB, vt89AB, vp89AB);
vpCDEF = vfmaq_f32(vtCDEF, vtCDEF, vpCDEF);
vpGHIJ = vfmaq_f32(vtGHIJ, vtGHIJ, vpGHIJ);
float32x4_t vf0123 = vfmaq_f32(vs0123, vs0123, vp0123);
float32x4_t vf4567 = vfmaq_f32(vs4567, vs4567, vp4567);
float32x4_t vf89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
float32x4_t vfCDEF = vfmaq_f32(vsCDEF, vsCDEF, vpCDEF);
float32x4_t vfGHIJ = vfmaq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcltq_f32(vxGHIJ, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc4 = vaddq_f32(vacc4, vf4567);
vacc3 = vaddq_f32(vacc3, vf89AB);
vacc2 = vaddq_f32(vacc2, vfCDEF);
vacc1 = vaddq_f32(vacc1, vfGHIJ);
}
vacc0 = vaddq_f32(vacc0, vacc1);
vacc2 = vaddq_f32(vacc2, vacc3);
vacc0 = vaddq_f32(vacc0, vacc2);
vacc0 = vaddq_f32(vacc0, vacc4);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
float32x4_t vf = vfmaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
float32x4_t vf = vfmaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 13,032 | 46.392727 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neonfma-rr1-lut64-p2-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x20(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t viCDEF = vld1q_f32(input); input += 4;
const float32x4_t viGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
const float32x4_t vxGHIJ = vsubq_f32(viGHIJ, vi_max);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vx89AB, vlog2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vxCDEF, vlog2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vxGHIJ, vlog2e);
const int32x4_t ve0123 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn0123), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve4567 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn4567), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve89AB = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn89AB), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t veCDEF = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vnCDEF), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t veGHIJ = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vnGHIJ), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask));
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx23]);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx67]);
float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx89]);
float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxAB]);
float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxCD]);
float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxEF]);
float32x2_t vlGH = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxGH]);
float32x2_t vlIJ = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidxIJ]);
vl01 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
vl89 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
vlAB = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
vlCD = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
vlEF = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
vlGH = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxGH >> 32)], vlGH, 1);
vlIJ = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidxIJ >> 32)], vlIJ, 1);
const float32x4_t vlGHIJ = vcombine_f32(vlGH, vlIJ);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlGHIJ), veGHIJ));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vx0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vx4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vx89AB, vn89AB, vminus_ln2);
float32x4_t vtCDEF = vfmaq_f32(vxCDEF, vnCDEF, vminus_ln2);
float32x4_t vtGHIJ = vfmaq_f32(vxGHIJ, vnGHIJ, vminus_ln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
float32x4_t vpGHIJ = vmulq_f32(vtGHIJ, vc2);
vp0123 = vfmaq_f32(vt0123, vt0123, vp0123);
vp4567 = vfmaq_f32(vt4567, vt4567, vp4567);
vp89AB = vfmaq_f32(vt89AB, vt89AB, vp89AB);
vpCDEF = vfmaq_f32(vtCDEF, vtCDEF, vpCDEF);
vpGHIJ = vfmaq_f32(vtGHIJ, vtGHIJ, vpGHIJ);
float32x4_t vf0123 = vfmaq_f32(vs0123, vs0123, vp0123);
float32x4_t vf4567 = vfmaq_f32(vs4567, vs4567, vp4567);
float32x4_t vf89AB = vfmaq_f32(vs89AB, vs89AB, vp89AB);
float32x4_t vfCDEF = vfmaq_f32(vsCDEF, vsCDEF, vpCDEF);
float32x4_t vfGHIJ = vfmaq_f32(vsGHIJ, vsGHIJ, vpGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcltq_f32(vxGHIJ, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
vacc0 = vaddq_f32(vacc0, vfCDEF);
vacc0 = vaddq_f32(vacc0, vfGHIJ);
}
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
float32x4_t vf = vfmaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
float32x4_t vf = vfmaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 12,723 | 46.655431 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neonfma-rr1-lut64-p2-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x4(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
float32x4_t vacc = vmovq_n_f32(0.0f);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
float32x4_t vf = vfmaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
float32x4_t vf = vfmaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 5,154 | 35.048951 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neonfma-rr1-lut64-p2-x8-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x8_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vx4567, vlog2e);
const int32x4_t ve0123 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn0123), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve4567 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn4567), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx23]);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx67]);
vl01 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vx0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vx4567, vn4567, vminus_ln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
vp0123 = vfmaq_f32(vt0123, vt0123, vp0123);
vp4567 = vfmaq_f32(vt4567, vt4567, vp4567);
float32x4_t vf0123 = vfmaq_f32(vs0123, vs0123, vp0123);
float32x4_t vf4567 = vfmaq_f32(vs4567, vs4567, vp4567);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
}
vacc0 = vaddq_f32(vacc0, vacc1);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
float32x4_t vf = vfmaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
float32x4_t vf = vfmaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 8,331 | 39.446602 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neonfma-rr1-lut64-p2-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
extern XNN_INTERNAL const float xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x8(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_ln2);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vx4567, vlog2e);
const int32x4_t ve0123 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn0123), vmovq_n_s32(INT32_C(0x3F))), 17);
const int32x4_t ve4567 = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn4567), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx01]);
float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx23]);
float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx45]);
float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx67]);
vl01 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
vl23 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
vl45 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
vl67 = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vx0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vx4567, vn4567, vminus_ln2);
float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
vp0123 = vfmaq_f32(vt0123, vt0123, vp0123);
vp4567 = vfmaq_f32(vt4567, vt4567, vp4567);
float32x4_t vf0123 = vfmaq_f32(vs0123, vs0123, vp0123);
float32x4_t vf4567 = vfmaq_f32(vs4567, vs4567, vp4567);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
}
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
float32x4_t vf = vfmaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const int32x4_t ve = vshlq_n_s32(vbicq_s32(vreinterpretq_s32_f32(vn), vmovq_n_s32(INT32_C(0x3F))), 17);
const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_lo]);
float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2_k_over_64[(uint32_t) vidx_hi]);
vl_lo = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
vl_hi = vld1_lane_f32(&xnn_table_exp2_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vmulq_f32(vt, vc2);
vp = vfmaq_f32(vt, vt, vp);
float32x4_t vf = vfmaq_f32(vs, vs, vp);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 8,250 | 39.446078 | 115 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neonfma-rr1-p5-x12-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x12_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vx89AB, vlog2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vx0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vx4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vx89AB, vn89AB, vminus_ln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
float32x4_t vf0123 = vfmaq_f32(vs0123, vp0123, vt0123);
float32x4_t vf4567 = vfmaq_f32(vs4567, vp4567, vt4567);
float32x4_t vf89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
}
vacc0 = vaddq_f32(vacc0, vacc1);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vfmaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vfmaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 6,849 | 34.128205 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neonfma-rr1-p5-x12-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x12_acc3(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
float32x4_t vacc2 = vmovq_n_f32(0.0f);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vx89AB, vlog2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vx0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vx4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vx89AB, vn89AB, vminus_ln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
float32x4_t vf0123 = vfmaq_f32(vs0123, vp0123, vt0123);
float32x4_t vf4567 = vfmaq_f32(vs4567, vp4567, vt4567);
float32x4_t vf89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc1 = vaddq_f32(vacc1, vf4567);
vacc2 = vaddq_f32(vacc2, vf89AB);
}
vacc0 = vaddq_f32(vacc0, vacc1);
vacc0 = vaddq_f32(vacc0, vacc2);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vfmaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vfmaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 6,925 | 34.15736 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neonfma-rr1-p5-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x12(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vx89AB, vlog2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vx0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vx4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vx89AB, vn89AB, vminus_ln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
float32x4_t vf0123 = vfmaq_f32(vs0123, vp0123, vt0123);
float32x4_t vf4567 = vfmaq_f32(vs4567, vp4567, vt4567);
float32x4_t vf89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
}
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vfmaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vfmaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 6,768 | 34.072539 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neonfma-rr1-p5-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x16_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t viCDEF = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vx89AB, vlog2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vxCDEF, vlog2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vx0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vx4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vx89AB, vn89AB, vminus_ln2);
float32x4_t vtCDEF = vfmaq_f32(vxCDEF, vnCDEF, vminus_ln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc4, vc5, vtCDEF);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc1, vpCDEF, vtCDEF);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
float32x4_t vf0123 = vfmaq_f32(vs0123, vp0123, vt0123);
float32x4_t vf4567 = vfmaq_f32(vs4567, vp4567, vt4567);
float32x4_t vf89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
float32x4_t vfCDEF = vfmaq_f32(vsCDEF, vpCDEF, vtCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
vacc0 = vaddq_f32(vacc0, vfCDEF);
}
vacc0 = vaddq_f32(vacc0, vacc1);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vfmaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vfmaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 7,728 | 35.804762 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neonfma-rr1-p5-x16-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x16_acc4(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
float32x4_t vacc2 = vmovq_n_f32(0.0f);
float32x4_t vacc3 = vmovq_n_f32(0.0f);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t viCDEF = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vx89AB, vlog2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vxCDEF, vlog2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vx0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vx4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vx89AB, vn89AB, vminus_ln2);
float32x4_t vtCDEF = vfmaq_f32(vxCDEF, vnCDEF, vminus_ln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc4, vc5, vtCDEF);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc1, vpCDEF, vtCDEF);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
float32x4_t vf0123 = vfmaq_f32(vs0123, vp0123, vt0123);
float32x4_t vf4567 = vfmaq_f32(vs4567, vp4567, vt4567);
float32x4_t vf89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
float32x4_t vfCDEF = vfmaq_f32(vsCDEF, vpCDEF, vtCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
vacc0 = vaddq_f32(vacc0, vfCDEF);
}
vacc0 = vaddq_f32(vacc0, vacc1);
vacc2 = vaddq_f32(vacc2, vacc3);
vacc0 = vaddq_f32(vacc0, vacc2);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vfmaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vfmaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 7,880 | 35.827103 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neonfma-rr1-p5-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x16(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t viCDEF = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vx89AB, vlog2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vxCDEF, vlog2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vx0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vx4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vx89AB, vn89AB, vminus_ln2);
float32x4_t vtCDEF = vfmaq_f32(vxCDEF, vnCDEF, vminus_ln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc4, vc5, vtCDEF);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc1, vpCDEF, vtCDEF);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
float32x4_t vf0123 = vfmaq_f32(vs0123, vp0123, vt0123);
float32x4_t vf4567 = vfmaq_f32(vs4567, vp4567, vt4567);
float32x4_t vf89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
float32x4_t vfCDEF = vfmaq_f32(vsCDEF, vpCDEF, vtCDEF);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
vacc0 = vaddq_f32(vacc0, vfCDEF);
}
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vfmaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vfmaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 7,647 | 35.769231 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neonfma-rr1-p5-x20-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x20_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t viCDEF = vld1q_f32(input); input += 4;
const float32x4_t viGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
const float32x4_t vxGHIJ = vsubq_f32(viGHIJ, vi_max);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vx89AB, vlog2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vxCDEF, vlog2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vxGHIJ, vlog2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vx0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vx4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vx89AB, vn89AB, vminus_ln2);
float32x4_t vtCDEF = vfmaq_f32(vxCDEF, vnCDEF, vminus_ln2);
float32x4_t vtGHIJ = vfmaq_f32(vxGHIJ, vnGHIJ, vminus_ln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc4, vc5, vtCDEF);
float32x4_t vpGHIJ = vfmaq_f32(vc4, vc5, vtGHIJ);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc3, vpGHIJ, vtGHIJ);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc2, vpGHIJ, vtGHIJ);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc1, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc1, vpGHIJ, vtGHIJ);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
float32x4_t vf0123 = vfmaq_f32(vs0123, vp0123, vt0123);
float32x4_t vf4567 = vfmaq_f32(vs4567, vp4567, vt4567);
float32x4_t vf89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
float32x4_t vfCDEF = vfmaq_f32(vsCDEF, vpCDEF, vtCDEF);
float32x4_t vfGHIJ = vfmaq_f32(vsGHIJ, vpGHIJ, vtGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcltq_f32(vxGHIJ, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
vacc0 = vaddq_f32(vacc0, vfCDEF);
vacc0 = vaddq_f32(vacc0, vfGHIJ);
}
vacc0 = vaddq_f32(vacc0, vacc1);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vfmaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vfmaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 8,607 | 37.257778 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neonfma-rr1-p5-x20-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x20_acc5(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
float32x4_t vacc2 = vmovq_n_f32(0.0f);
float32x4_t vacc3 = vmovq_n_f32(0.0f);
float32x4_t vacc4 = vmovq_n_f32(0.0f);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t viCDEF = vld1q_f32(input); input += 4;
const float32x4_t viGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
const float32x4_t vxGHIJ = vsubq_f32(viGHIJ, vi_max);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vx89AB, vlog2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vxCDEF, vlog2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vxGHIJ, vlog2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vx0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vx4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vx89AB, vn89AB, vminus_ln2);
float32x4_t vtCDEF = vfmaq_f32(vxCDEF, vnCDEF, vminus_ln2);
float32x4_t vtGHIJ = vfmaq_f32(vxGHIJ, vnGHIJ, vminus_ln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc4, vc5, vtCDEF);
float32x4_t vpGHIJ = vfmaq_f32(vc4, vc5, vtGHIJ);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc3, vpGHIJ, vtGHIJ);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc2, vpGHIJ, vtGHIJ);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc1, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc1, vpGHIJ, vtGHIJ);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
float32x4_t vf0123 = vfmaq_f32(vs0123, vp0123, vt0123);
float32x4_t vf4567 = vfmaq_f32(vs4567, vp4567, vt4567);
float32x4_t vf89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
float32x4_t vfCDEF = vfmaq_f32(vsCDEF, vpCDEF, vtCDEF);
float32x4_t vfGHIJ = vfmaq_f32(vsGHIJ, vpGHIJ, vtGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcltq_f32(vxGHIJ, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc4 = vaddq_f32(vacc4, vf4567);
vacc3 = vaddq_f32(vacc3, vf89AB);
vacc2 = vaddq_f32(vacc2, vfCDEF);
vacc1 = vaddq_f32(vacc1, vfGHIJ);
}
vacc0 = vaddq_f32(vacc0, vacc1);
vacc2 = vaddq_f32(vacc2, vacc3);
vacc0 = vaddq_f32(vacc0, vacc2);
vacc0 = vaddq_f32(vacc0, vacc4);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vfmaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vfmaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 8,835 | 37.251082 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neonfma-rr1-p5-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x20(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vi89AB = vld1q_f32(input); input += 4;
const float32x4_t viCDEF = vld1q_f32(input); input += 4;
const float32x4_t viGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
const float32x4_t vx89AB = vsubq_f32(vi89AB, vi_max);
const float32x4_t vxCDEF = vsubq_f32(viCDEF, vi_max);
const float32x4_t vxGHIJ = vsubq_f32(viGHIJ, vi_max);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vx4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vx89AB, vlog2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vxCDEF, vlog2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vxGHIJ, vlog2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
const float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
const float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
const float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vx0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vx4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vx89AB, vn89AB, vminus_ln2);
float32x4_t vtCDEF = vfmaq_f32(vxCDEF, vnCDEF, vminus_ln2);
float32x4_t vtGHIJ = vfmaq_f32(vxGHIJ, vnGHIJ, vminus_ln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc4, vc5, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc4, vc5, vtCDEF);
float32x4_t vpGHIJ = vfmaq_f32(vc4, vc5, vtGHIJ);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc3, vpGHIJ, vtGHIJ);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc2, vpGHIJ, vtGHIJ);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vp89AB = vfmaq_f32(vc1, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc1, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc1, vpGHIJ, vtGHIJ);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
float32x4_t vf0123 = vfmaq_f32(vs0123, vp0123, vt0123);
float32x4_t vf4567 = vfmaq_f32(vs4567, vp4567, vt4567);
float32x4_t vf89AB = vfmaq_f32(vs89AB, vp89AB, vt89AB);
float32x4_t vfCDEF = vfmaq_f32(vsCDEF, vpCDEF, vtCDEF);
float32x4_t vfGHIJ = vfmaq_f32(vsGHIJ, vpGHIJ, vtGHIJ);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcltq_f32(vx89AB, vdenorm_cutoff)));
vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcltq_f32(vxCDEF, vdenorm_cutoff)));
vfGHIJ = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfGHIJ), vcltq_f32(vxGHIJ, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vst1q_f32(output, vf89AB); output += 4;
vst1q_f32(output, vfCDEF); output += 4;
vst1q_f32(output, vfGHIJ); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
vacc0 = vaddq_f32(vacc0, vf89AB);
vacc0 = vaddq_f32(vacc0, vfCDEF);
vacc0 = vaddq_f32(vacc0, vfGHIJ);
}
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vfmaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vfmaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 8,526 | 37.237668 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neonfma-rr1-p5-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x4(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
float32x4_t vacc = vmovq_n_f32(0.0f);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vfmaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vfmaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 4,013 | 29.876923 | 103 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neonfma-rr1-p5-x8-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x8_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
float32x4_t vacc1 = vmovq_n_f32(0.0f);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vx4567, vlog2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vx0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vx4567, vn4567, vminus_ln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
float32x4_t vf0123 = vfmaq_f32(vs0123, vp0123, vt0123);
float32x4_t vf4567 = vfmaq_f32(vs4567, vp4567, vt4567);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
}
vacc0 = vaddq_f32(vacc0, vacc1);
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vfmaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vfmaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 5,967 | 32.155556 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-neonfma-rr1-p5-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x8(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float32x4_t vi_max = vld1q_dup_f32(max);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_p5.log2e);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p5.magic_bias);
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_ln2);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c2);
const float32x4_t vc1 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.c1);
const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p5.denorm_cutoff);
float32x4_t vacc0 = vmovq_n_f32(0.0f);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vi0123 = vld1q_f32(input); input += 4;
const float32x4_t vi4567 = vld1q_f32(input); input += 4;
const float32x4_t vx0123 = vsubq_f32(vi0123, vi_max);
const float32x4_t vx4567 = vsubq_f32(vi4567, vi_max);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vx0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vx4567, vlog2e);
const float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
const float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vx0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vx4567, vn4567, vminus_ln2);
float32x4_t vp0123 = vfmaq_f32(vc4, vc5, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp0123 = vfmaq_f32(vc1, vp0123, vt0123);
vp4567 = vfmaq_f32(vc1, vp4567, vt4567);
vt0123 = vmulq_f32(vt0123, vs0123);
vt4567 = vmulq_f32(vt4567, vs4567);
float32x4_t vf0123 = vfmaq_f32(vs0123, vp0123, vt0123);
float32x4_t vf4567 = vfmaq_f32(vs4567, vp4567, vt4567);
vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcltq_f32(vx0123, vdenorm_cutoff)));
vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcltq_f32(vx4567, vdenorm_cutoff)));
vst1q_f32(output, vf0123); output += 4;
vst1q_f32(output, vf4567); output += 4;
vacc0 = vaddq_f32(vacc0, vf0123);
vacc0 = vaddq_f32(vacc0, vf4567);
}
float32x4_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vfmaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
vst1q_f32(output, vf); output += 4;
vacc = vaddq_f32(vacc, vf);
}
#if XNN_ARCH_ARM64
float vacc_lo = vaddvq_f32(vacc);
#else
float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
#endif
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const float32x4_t vi = vld1q_f32(input); input += 4;
const float32x4_t vx = vsubq_f32(vi, vi_max);
float32x4_t vn = vfmaq_f32(vmagic_bias, vx, vlog2e);
const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vx, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc4, vc5, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vfmaq_f32(vc1, vp, vt);
vt = vmulq_f32(vt, vs);
float32x4_t vf = vfmaq_f32(vs, vp, vt);
vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
float32x2_t vf_lo = vget_low_f32(vf);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vf_lo); output += 2;
#if XNN_ARCH_ARM64
vacc_lo += vaddv_f32(vf_lo);
#else
vacc_lo = vadd_f32(vacc_lo, vf_lo);
#endif
vf_lo = vget_high_f32(vf);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vf_lo, 0);
#if XNN_ARCH_ARM64
vacc_lo += vget_lane_f32(vf_lo, 0);
#else
vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
#endif
}
}
#if XNN_ARCH_ARM64
*sum = vacc_lo;
#else
vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
#endif
}
| 5,886 | 32.073034 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-scalar-rr2-lut64-p2-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/scalar-rr2-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/raddstoreexpminusmax.h>
// Note redefine as uint32[] to avoid redundant bitcasts.
extern XNN_INTERNAL const uint32_t xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_lut64_p2_x1(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float vi_max = *max;
const float vlog2e = params->scalar_rr2_lut64_p2.log2e;
const float vmagic_bias = params->scalar_rr2_lut64_p2.magic_bias;
const uint32_t vindex_mask = UINT32_C(0x3F);
const float vminus_ln2_hi = params->scalar_rr2_lut64_p2.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_lut64_p2.minus_ln2_lo;
const float vc2 = params->scalar_rr2_lut64_p2.c2;
const float vdenorm_cutoff = params->scalar_rr2_lut64_p2.denorm_cutoff;
float vacc = 0.0f;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
// Load 1 input at a time.
const float vi = *input++;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const float vx = vi - vi_max;
// Compute reduced argument n := round(x * 64 / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to an integer, then subtracing
// the large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
// The trick with adding large number is valid only within certain bounds (|x * 64 / log(2)| <= 2**22, i.e.
// |x| <= 0x1.62E43p+15 = 45426.09375), but that is acceptable, because inputs outside of [-87.336540, 0.0]
// result in denormalized or underflown expf(x). We fixup the result for such inputs at the very end of the
// algorithm.
float vn = vx * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(n / 64) for such inputs that expf(x) is normalized,
// i.e. -87.33642 <= x <= 0.0. As n has 6 fractional bits, we split s == 2**(n / 64) = 2**e * 2**(n / 64 - e), where
// e := int(n / 64). We create s in two steps:
// 1. Fetch 2**(n / 64 - e) = 2**(n % 64) from the table using the 6 low bits of n, as integer. Note that the
// fetched values are in the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of e to its floating-point exponent. The result is always a normalized
// number, because for -87.33642 <= x <= 0.0 (inputs for which expf(x) is normalized) we have -126 <= e <= 0,
// and thus the adjusted exponent is not lower than -126.
//
// Extract e from bits 6:14 of n and shift it into bits 23:31 (position of floating-point exponent).
const uint32_t ve = (float_as_uint32(vn) & UINT32_C(0xFFFFFFC0)) << 17;
// Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**(n % 64).
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(xnn_table_exp2_k_over_64[vidx] + ve);
// Subtract the large number back to get final n := round(x * 64 / log(2)) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := x - n * log(2) / 64.
// Use Cody-Waite range reduction method (note the two constants representing log(2) / 64) to improve accuracy.
float vt = vn * vminus_ln2_hi + vx;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-2 polynomial approximation for exp(t) on [-log(2)/128, log(2)/128].
float vp = vt * vc2;
vp = vp * vt + vt;
// Reconstruct the final f value:
// f = s * (1 + t * (1 + t * c2))
// = s * (1 + t + t * (t * c2))
// = s + s * (t + t * (t * c2))
// = s + s * p
float vf = vp * vs + vs;
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
if XNN_UNPREDICTABLE(vx < vdenorm_cutoff) {
vf = 0.0f;
}
// Store 1 output at a time.
*output++ = vf;
// Accumulate computed exponents.
vacc += vf;
}
*sum = vacc;
}
| 4,761 | 41.900901 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-scalar-rr2-lut64-p2-x2-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/scalar-rr2-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/raddstoreexpminusmax.h>
// Note redefine as uint32[] to avoid redundant bitcasts.
extern XNN_INTERNAL const uint32_t xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_lut64_p2_x2_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float vi_max = *max;
const float vlog2e = params->scalar_rr2_lut64_p2.log2e;
const float vmagic_bias = params->scalar_rr2_lut64_p2.magic_bias;
const uint32_t vindex_mask = UINT32_C(0x3F);
const float vminus_ln2_hi = params->scalar_rr2_lut64_p2.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_lut64_p2.minus_ln2_lo;
const float vc2 = params->scalar_rr2_lut64_p2.c2;
const float vdenorm_cutoff = params->scalar_rr2_lut64_p2.denorm_cutoff;
float vacc0 = 0.0f;
float vacc1 = 0.0f;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
// Load 2 inputs at a time.
const float vi0 = input[0];
const float vi1 = input[1];
input += 2;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const float vx0 = vi0 - vi_max;
const float vx1 = vi1 - vi_max;
// Compute reduced argument n := round(x * 64 / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to an integer, then subtracing
// the large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
// The trick with adding large number is valid only within certain bounds (|x * 64 / log(2)| <= 2**22, i.e.
// |x| <= 0x1.62E43p+15 = 45426.09375), but that is acceptable, because inputs outside of [-87.336540, 0.0]
// result in denormalized or underflown expf(x). We fixup the result for such inputs at the very end of the
// algorithm.
float vn0 = vx0 * vlog2e + vmagic_bias;
float vn1 = vx1 * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(n / 64) for such inputs that expf(x) is normalized,
// i.e. -87.33642 <= x <= 0.0. As n has 6 fractional bits, we split s == 2**(n / 64) = 2**e * 2**(n / 64 - e), where
// e := int(n / 64). We create s in two steps:
// 1. Fetch 2**(n / 64 - e) = 2**(n % 64) from the table using the 6 low bits of n, as integer. Note that the
// fetched values are in the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of e to its floating-point exponent. The result is always a normalized
// number, because for -87.33642 <= x <= 0.0 (inputs for which expf(x) is normalized) we have -126 <= e <= 0,
// and thus the adjusted exponent is not lower than -126.
//
// Extract e from bits 6:14 of n and shift it into bits 23:31 (position of floating-point exponent).
const uint32_t ve0 = (float_as_uint32(vn0) & UINT32_C(0xFFFFFFC0)) << 17;
const uint32_t ve1 = (float_as_uint32(vn1) & UINT32_C(0xFFFFFFC0)) << 17;
// Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**(n % 64).
const uint32_t vidx0 = float_as_uint32(vn0) & vindex_mask;
const uint32_t vidx1 = float_as_uint32(vn1) & vindex_mask;
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs0 = uint32_as_float(xnn_table_exp2_k_over_64[vidx0] + ve0);
const float vs1 = uint32_as_float(xnn_table_exp2_k_over_64[vidx1] + ve1);
// Subtract the large number back to get final n := round(x * 64 / log(2)) as a floating-point number.
vn0 -= vmagic_bias;
vn1 -= vmagic_bias;
// Compute reduced argument t := x - n * log(2) / 64.
// Use Cody-Waite range reduction method (note the two constants representing log(2) / 64) to improve accuracy.
float vt0 = vn0 * vminus_ln2_hi + vx0;
float vt1 = vn1 * vminus_ln2_hi + vx1;
vt0 = vn0 * vminus_ln2_lo + vt0;
vt1 = vn1 * vminus_ln2_lo + vt1;
// Compute degree-2 polynomial approximation for exp(t) on [-log(2)/128, log(2)/128].
float vp0 = vt0 * vc2;
float vp1 = vt1 * vc2;
vp0 = vp0 * vt0 + vt0;
vp1 = vp1 * vt1 + vt1;
// Reconstruct the final f value:
// f = s * (1 + t * (1 + t * c2))
// = s * (1 + t + t * (t * c2))
// = s + s * (t + t * (t * c2))
// = s + s * p
float vf0 = vp0 * vs0 + vs0;
float vf1 = vp1 * vs1 + vs1;
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
if XNN_UNPREDICTABLE(vx0 < vdenorm_cutoff) {
vf0 = 0.0f;
}
if XNN_UNPREDICTABLE(vx1 < vdenorm_cutoff) {
vf1 = 0.0f;
}
// Store 2 outputs at a time.
output[0] = vf0;
output[1] = vf1;
output += 2;
// Accumulate computed exponents.
vacc0 += vf0;
vacc1 += vf1;
}
// Add up all accumulators to vacc0
vacc0 += vacc1;
float vacc = vacc0;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
// Load 1 input at a time.
const float vi = *input++;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const float vx = vi - vi_max;
// Compute reduced argument n := round(x * 64 / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to an integer, then subtracing
// the large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
// The trick with adding large number is valid only within certain bounds (|x * 64 / log(2)| <= 2**22, i.e.
// |x| <= 0x1.62E43p+15 = 45426.09375), but that is acceptable, because inputs outside of [-87.336540, 0.0]
// result in denormalized or underflown expf(x). We fixup the result for such inputs at the very end of the
// algorithm.
float vn = vx * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(n / 64) for such inputs that expf(x) is normalized,
// i.e. -87.33642 <= x <= 0.0. As n has 6 fractional bits, we split s == 2**(n / 64) = 2**e * 2**(n / 64 - e), where
// e := int(n / 64). We create s in two steps:
// 1. Fetch 2**(n / 64 - e) = 2**(n % 64) from the table using the 6 low bits of n, as integer. Note that the
// fetched values are in the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of e to its floating-point exponent. The result is always a normalized
// number, because for -87.33642 <= x <= 0.0 (inputs for which expf(x) is normalized) we have -126 <= e <= 0,
// and thus the adjusted exponent is not lower than -126.
//
// Extract e from bits 6:14 of n and shift it into bits 23:31 (position of floating-point exponent).
const uint32_t ve = (float_as_uint32(vn) & UINT32_C(0xFFFFFFC0)) << 17;
// Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**(n % 64).
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(xnn_table_exp2_k_over_64[vidx] + ve);
// Subtract the large number back to get final n := round(x * 64 / log(2)) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := x - n * log(2) / 64.
// Use Cody-Waite range reduction method (note the two constants representing log(2) / 64) to improve accuracy.
float vt = vn * vminus_ln2_hi + vx;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-2 polynomial approximation for exp(t) on [-log(2)/128, log(2)/128].
float vp = vt * vc2;
vp = vp * vt + vt;
// Reconstruct the final f value:
// f = s * (1 + t * (1 + t * c2))
// = s * (1 + t + t * (t * c2))
// = s + s * (t + t * (t * c2))
// = s + s * p
float vf = vp * vs + vs;
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
if XNN_UNPREDICTABLE(vx < vdenorm_cutoff) {
vf = 0.0f;
}
// Store 1 output at a time.
*output++ = vf;
// Accumulate computed exponents.
vacc += vf;
}
*sum = vacc;
}
| 8,882 | 43.19403 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-scalar-rr2-lut64-p2-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/scalar-rr2-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/raddstoreexpminusmax.h>
// Note redefine as uint32[] to avoid redundant bitcasts.
extern XNN_INTERNAL const uint32_t xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_lut64_p2_x2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float vi_max = *max;
const float vlog2e = params->scalar_rr2_lut64_p2.log2e;
const float vmagic_bias = params->scalar_rr2_lut64_p2.magic_bias;
const uint32_t vindex_mask = UINT32_C(0x3F);
const float vminus_ln2_hi = params->scalar_rr2_lut64_p2.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_lut64_p2.minus_ln2_lo;
const float vc2 = params->scalar_rr2_lut64_p2.c2;
const float vdenorm_cutoff = params->scalar_rr2_lut64_p2.denorm_cutoff;
float vacc0 = 0.0f;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
// Load 2 inputs at a time.
const float vi0 = input[0];
const float vi1 = input[1];
input += 2;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const float vx0 = vi0 - vi_max;
const float vx1 = vi1 - vi_max;
// Compute reduced argument n := round(x * 64 / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to an integer, then subtracing
// the large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
// The trick with adding large number is valid only within certain bounds (|x * 64 / log(2)| <= 2**22, i.e.
// |x| <= 0x1.62E43p+15 = 45426.09375), but that is acceptable, because inputs outside of [-87.336540, 0.0]
// result in denormalized or underflown expf(x). We fixup the result for such inputs at the very end of the
// algorithm.
float vn0 = vx0 * vlog2e + vmagic_bias;
float vn1 = vx1 * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(n / 64) for such inputs that expf(x) is normalized,
// i.e. -87.33642 <= x <= 0.0. As n has 6 fractional bits, we split s == 2**(n / 64) = 2**e * 2**(n / 64 - e), where
// e := int(n / 64). We create s in two steps:
// 1. Fetch 2**(n / 64 - e) = 2**(n % 64) from the table using the 6 low bits of n, as integer. Note that the
// fetched values are in the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of e to its floating-point exponent. The result is always a normalized
// number, because for -87.33642 <= x <= 0.0 (inputs for which expf(x) is normalized) we have -126 <= e <= 0,
// and thus the adjusted exponent is not lower than -126.
//
// Extract e from bits 6:14 of n and shift it into bits 23:31 (position of floating-point exponent).
const uint32_t ve0 = (float_as_uint32(vn0) & UINT32_C(0xFFFFFFC0)) << 17;
const uint32_t ve1 = (float_as_uint32(vn1) & UINT32_C(0xFFFFFFC0)) << 17;
// Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**(n % 64).
const uint32_t vidx0 = float_as_uint32(vn0) & vindex_mask;
const uint32_t vidx1 = float_as_uint32(vn1) & vindex_mask;
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs0 = uint32_as_float(xnn_table_exp2_k_over_64[vidx0] + ve0);
const float vs1 = uint32_as_float(xnn_table_exp2_k_over_64[vidx1] + ve1);
// Subtract the large number back to get final n := round(x * 64 / log(2)) as a floating-point number.
vn0 -= vmagic_bias;
vn1 -= vmagic_bias;
// Compute reduced argument t := x - n * log(2) / 64.
// Use Cody-Waite range reduction method (note the two constants representing log(2) / 64) to improve accuracy.
float vt0 = vn0 * vminus_ln2_hi + vx0;
float vt1 = vn1 * vminus_ln2_hi + vx1;
vt0 = vn0 * vminus_ln2_lo + vt0;
vt1 = vn1 * vminus_ln2_lo + vt1;
// Compute degree-2 polynomial approximation for exp(t) on [-log(2)/128, log(2)/128].
float vp0 = vt0 * vc2;
float vp1 = vt1 * vc2;
vp0 = vp0 * vt0 + vt0;
vp1 = vp1 * vt1 + vt1;
// Reconstruct the final f value:
// f = s * (1 + t * (1 + t * c2))
// = s * (1 + t + t * (t * c2))
// = s + s * (t + t * (t * c2))
// = s + s * p
float vf0 = vp0 * vs0 + vs0;
float vf1 = vp1 * vs1 + vs1;
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
if XNN_UNPREDICTABLE(vx0 < vdenorm_cutoff) {
vf0 = 0.0f;
}
if XNN_UNPREDICTABLE(vx1 < vdenorm_cutoff) {
vf1 = 0.0f;
}
// Store 2 outputs at a time.
output[0] = vf0;
output[1] = vf1;
output += 2;
// Accumulate computed exponents.
vacc0 += vf0;
vacc0 += vf1;
}
float vacc = vacc0;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
// Load 1 input at a time.
const float vi = *input++;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const float vx = vi - vi_max;
// Compute reduced argument n := round(x * 64 / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to an integer, then subtracing
// the large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
// The trick with adding large number is valid only within certain bounds (|x * 64 / log(2)| <= 2**22, i.e.
// |x| <= 0x1.62E43p+15 = 45426.09375), but that is acceptable, because inputs outside of [-87.336540, 0.0]
// result in denormalized or underflown expf(x). We fixup the result for such inputs at the very end of the
// algorithm.
float vn = vx * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(n / 64) for such inputs that expf(x) is normalized,
// i.e. -87.33642 <= x <= 0.0. As n has 6 fractional bits, we split s == 2**(n / 64) = 2**e * 2**(n / 64 - e), where
// e := int(n / 64). We create s in two steps:
// 1. Fetch 2**(n / 64 - e) = 2**(n % 64) from the table using the 6 low bits of n, as integer. Note that the
// fetched values are in the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of e to its floating-point exponent. The result is always a normalized
// number, because for -87.33642 <= x <= 0.0 (inputs for which expf(x) is normalized) we have -126 <= e <= 0,
// and thus the adjusted exponent is not lower than -126.
//
// Extract e from bits 6:14 of n and shift it into bits 23:31 (position of floating-point exponent).
const uint32_t ve = (float_as_uint32(vn) & UINT32_C(0xFFFFFFC0)) << 17;
// Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**(n % 64).
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(xnn_table_exp2_k_over_64[vidx] + ve);
// Subtract the large number back to get final n := round(x * 64 / log(2)) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := x - n * log(2) / 64.
// Use Cody-Waite range reduction method (note the two constants representing log(2) / 64) to improve accuracy.
float vt = vn * vminus_ln2_hi + vx;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-2 polynomial approximation for exp(t) on [-log(2)/128, log(2)/128].
float vp = vt * vc2;
vp = vp * vt + vt;
// Reconstruct the final f value:
// f = s * (1 + t * (1 + t * c2))
// = s * (1 + t + t * (t * c2))
// = s + s * (t + t * (t * c2))
// = s + s * p
float vf = vp * vs + vs;
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
if XNN_UNPREDICTABLE(vx < vdenorm_cutoff) {
vf = 0.0f;
}
// Store 1 output at a time.
*output++ = vf;
// Accumulate computed exponents.
vacc += vf;
}
*sum = vacc;
}
| 8,799 | 43.444444 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-scalar-rr2-lut64-p2-x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/scalar-rr2-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/raddstoreexpminusmax.h>
// Note redefine as uint32[] to avoid redundant bitcasts.
extern XNN_INTERNAL const uint32_t xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_lut64_p2_x4_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float vi_max = *max;
const float vlog2e = params->scalar_rr2_lut64_p2.log2e;
const float vmagic_bias = params->scalar_rr2_lut64_p2.magic_bias;
const uint32_t vindex_mask = UINT32_C(0x3F);
const float vminus_ln2_hi = params->scalar_rr2_lut64_p2.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_lut64_p2.minus_ln2_lo;
const float vc2 = params->scalar_rr2_lut64_p2.c2;
const float vdenorm_cutoff = params->scalar_rr2_lut64_p2.denorm_cutoff;
float vacc0 = 0.0f;
float vacc1 = 0.0f;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
// Load 4 inputs at a time.
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
input += 4;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const float vx0 = vi0 - vi_max;
const float vx1 = vi1 - vi_max;
const float vx2 = vi2 - vi_max;
const float vx3 = vi3 - vi_max;
// Compute reduced argument n := round(x * 64 / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to an integer, then subtracing
// the large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
// The trick with adding large number is valid only within certain bounds (|x * 64 / log(2)| <= 2**22, i.e.
// |x| <= 0x1.62E43p+15 = 45426.09375), but that is acceptable, because inputs outside of [-87.336540, 0.0]
// result in denormalized or underflown expf(x). We fixup the result for such inputs at the very end of the
// algorithm.
float vn0 = vx0 * vlog2e + vmagic_bias;
float vn1 = vx1 * vlog2e + vmagic_bias;
float vn2 = vx2 * vlog2e + vmagic_bias;
float vn3 = vx3 * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(n / 64) for such inputs that expf(x) is normalized,
// i.e. -87.33642 <= x <= 0.0. As n has 6 fractional bits, we split s == 2**(n / 64) = 2**e * 2**(n / 64 - e), where
// e := int(n / 64). We create s in two steps:
// 1. Fetch 2**(n / 64 - e) = 2**(n % 64) from the table using the 6 low bits of n, as integer. Note that the
// fetched values are in the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of e to its floating-point exponent. The result is always a normalized
// number, because for -87.33642 <= x <= 0.0 (inputs for which expf(x) is normalized) we have -126 <= e <= 0,
// and thus the adjusted exponent is not lower than -126.
//
// Extract e from bits 6:14 of n and shift it into bits 23:31 (position of floating-point exponent).
const uint32_t ve0 = (float_as_uint32(vn0) & UINT32_C(0xFFFFFFC0)) << 17;
const uint32_t ve1 = (float_as_uint32(vn1) & UINT32_C(0xFFFFFFC0)) << 17;
const uint32_t ve2 = (float_as_uint32(vn2) & UINT32_C(0xFFFFFFC0)) << 17;
const uint32_t ve3 = (float_as_uint32(vn3) & UINT32_C(0xFFFFFFC0)) << 17;
// Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**(n % 64).
const uint32_t vidx0 = float_as_uint32(vn0) & vindex_mask;
const uint32_t vidx1 = float_as_uint32(vn1) & vindex_mask;
const uint32_t vidx2 = float_as_uint32(vn2) & vindex_mask;
const uint32_t vidx3 = float_as_uint32(vn3) & vindex_mask;
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs0 = uint32_as_float(xnn_table_exp2_k_over_64[vidx0] + ve0);
const float vs1 = uint32_as_float(xnn_table_exp2_k_over_64[vidx1] + ve1);
const float vs2 = uint32_as_float(xnn_table_exp2_k_over_64[vidx2] + ve2);
const float vs3 = uint32_as_float(xnn_table_exp2_k_over_64[vidx3] + ve3);
// Subtract the large number back to get final n := round(x * 64 / log(2)) as a floating-point number.
vn0 -= vmagic_bias;
vn1 -= vmagic_bias;
vn2 -= vmagic_bias;
vn3 -= vmagic_bias;
// Compute reduced argument t := x - n * log(2) / 64.
// Use Cody-Waite range reduction method (note the two constants representing log(2) / 64) to improve accuracy.
float vt0 = vn0 * vminus_ln2_hi + vx0;
float vt1 = vn1 * vminus_ln2_hi + vx1;
float vt2 = vn2 * vminus_ln2_hi + vx2;
float vt3 = vn3 * vminus_ln2_hi + vx3;
vt0 = vn0 * vminus_ln2_lo + vt0;
vt1 = vn1 * vminus_ln2_lo + vt1;
vt2 = vn2 * vminus_ln2_lo + vt2;
vt3 = vn3 * vminus_ln2_lo + vt3;
// Compute degree-2 polynomial approximation for exp(t) on [-log(2)/128, log(2)/128].
float vp0 = vt0 * vc2;
float vp1 = vt1 * vc2;
float vp2 = vt2 * vc2;
float vp3 = vt3 * vc2;
vp0 = vp0 * vt0 + vt0;
vp1 = vp1 * vt1 + vt1;
vp2 = vp2 * vt2 + vt2;
vp3 = vp3 * vt3 + vt3;
// Reconstruct the final f value:
// f = s * (1 + t * (1 + t * c2))
// = s * (1 + t + t * (t * c2))
// = s + s * (t + t * (t * c2))
// = s + s * p
float vf0 = vp0 * vs0 + vs0;
float vf1 = vp1 * vs1 + vs1;
float vf2 = vp2 * vs2 + vs2;
float vf3 = vp3 * vs3 + vs3;
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
if XNN_UNPREDICTABLE(vx0 < vdenorm_cutoff) {
vf0 = 0.0f;
}
if XNN_UNPREDICTABLE(vx1 < vdenorm_cutoff) {
vf1 = 0.0f;
}
if XNN_UNPREDICTABLE(vx2 < vdenorm_cutoff) {
vf2 = 0.0f;
}
if XNN_UNPREDICTABLE(vx3 < vdenorm_cutoff) {
vf3 = 0.0f;
}
// Store 4 outputs at a time.
output[0] = vf0;
output[1] = vf1;
output[2] = vf2;
output[3] = vf3;
output += 4;
// Accumulate computed exponents.
vacc0 += vf0;
vacc1 += vf1;
vacc0 += vf2;
vacc1 += vf3;
}
// Add up all accumulators to vacc0
vacc0 += vacc1;
float vacc = vacc0;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
// Load 1 input at a time.
const float vi = *input++;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const float vx = vi - vi_max;
// Compute reduced argument n := round(x * 64 / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to an integer, then subtracing
// the large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
// The trick with adding large number is valid only within certain bounds (|x * 64 / log(2)| <= 2**22, i.e.
// |x| <= 0x1.62E43p+15 = 45426.09375), but that is acceptable, because inputs outside of [-87.336540, 0.0]
// result in denormalized or underflown expf(x). We fixup the result for such inputs at the very end of the
// algorithm.
float vn = vx * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(n / 64) for such inputs that expf(x) is normalized,
// i.e. -87.33642 <= x <= 0.0. As n has 6 fractional bits, we split s == 2**(n / 64) = 2**e * 2**(n / 64 - e), where
// e := int(n / 64). We create s in two steps:
// 1. Fetch 2**(n / 64 - e) = 2**(n % 64) from the table using the 6 low bits of n, as integer. Note that the
// fetched values are in the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of e to its floating-point exponent. The result is always a normalized
// number, because for -87.33642 <= x <= 0.0 (inputs for which expf(x) is normalized) we have -126 <= e <= 0,
// and thus the adjusted exponent is not lower than -126.
//
// Extract e from bits 6:14 of n and shift it into bits 23:31 (position of floating-point exponent).
const uint32_t ve = (float_as_uint32(vn) & UINT32_C(0xFFFFFFC0)) << 17;
// Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**(n % 64).
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(xnn_table_exp2_k_over_64[vidx] + ve);
// Subtract the large number back to get final n := round(x * 64 / log(2)) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := x - n * log(2) / 64.
// Use Cody-Waite range reduction method (note the two constants representing log(2) / 64) to improve accuracy.
float vt = vn * vminus_ln2_hi + vx;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-2 polynomial approximation for exp(t) on [-log(2)/128, log(2)/128].
float vp = vt * vc2;
vp = vp * vt + vt;
// Reconstruct the final f value:
// f = s * (1 + t * (1 + t * c2))
// = s * (1 + t + t * (t * c2))
// = s + s * (t + t * (t * c2))
// = s + s * p
float vf = vp * vs + vs;
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
if XNN_UNPREDICTABLE(vx < vdenorm_cutoff) {
vf = 0.0f;
}
// Store 1 output at a time.
*output++ = vf;
// Accumulate computed exponents.
vacc += vf;
}
*sum = vacc;
}
| 10,150 | 42.195745 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-scalar-rr2-lut64-p2-x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/scalar-rr2-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/raddstoreexpminusmax.h>
// Note redefine as uint32[] to avoid redundant bitcasts.
extern XNN_INTERNAL const uint32_t xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_lut64_p2_x4_acc4(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float vi_max = *max;
const float vlog2e = params->scalar_rr2_lut64_p2.log2e;
const float vmagic_bias = params->scalar_rr2_lut64_p2.magic_bias;
const uint32_t vindex_mask = UINT32_C(0x3F);
const float vminus_ln2_hi = params->scalar_rr2_lut64_p2.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_lut64_p2.minus_ln2_lo;
const float vc2 = params->scalar_rr2_lut64_p2.c2;
const float vdenorm_cutoff = params->scalar_rr2_lut64_p2.denorm_cutoff;
float vacc0 = 0.0f;
float vacc1 = 0.0f;
float vacc2 = 0.0f;
float vacc3 = 0.0f;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
// Load 4 inputs at a time.
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
input += 4;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const float vx0 = vi0 - vi_max;
const float vx1 = vi1 - vi_max;
const float vx2 = vi2 - vi_max;
const float vx3 = vi3 - vi_max;
// Compute reduced argument n := round(x * 64 / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to an integer, then subtracing
// the large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
// The trick with adding large number is valid only within certain bounds (|x * 64 / log(2)| <= 2**22, i.e.
// |x| <= 0x1.62E43p+15 = 45426.09375), but that is acceptable, because inputs outside of [-87.336540, 0.0]
// result in denormalized or underflown expf(x). We fixup the result for such inputs at the very end of the
// algorithm.
float vn0 = vx0 * vlog2e + vmagic_bias;
float vn1 = vx1 * vlog2e + vmagic_bias;
float vn2 = vx2 * vlog2e + vmagic_bias;
float vn3 = vx3 * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(n / 64) for such inputs that expf(x) is normalized,
// i.e. -87.33642 <= x <= 0.0. As n has 6 fractional bits, we split s == 2**(n / 64) = 2**e * 2**(n / 64 - e), where
// e := int(n / 64). We create s in two steps:
// 1. Fetch 2**(n / 64 - e) = 2**(n % 64) from the table using the 6 low bits of n, as integer. Note that the
// fetched values are in the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of e to its floating-point exponent. The result is always a normalized
// number, because for -87.33642 <= x <= 0.0 (inputs for which expf(x) is normalized) we have -126 <= e <= 0,
// and thus the adjusted exponent is not lower than -126.
//
// Extract e from bits 6:14 of n and shift it into bits 23:31 (position of floating-point exponent).
const uint32_t ve0 = (float_as_uint32(vn0) & UINT32_C(0xFFFFFFC0)) << 17;
const uint32_t ve1 = (float_as_uint32(vn1) & UINT32_C(0xFFFFFFC0)) << 17;
const uint32_t ve2 = (float_as_uint32(vn2) & UINT32_C(0xFFFFFFC0)) << 17;
const uint32_t ve3 = (float_as_uint32(vn3) & UINT32_C(0xFFFFFFC0)) << 17;
// Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**(n % 64).
const uint32_t vidx0 = float_as_uint32(vn0) & vindex_mask;
const uint32_t vidx1 = float_as_uint32(vn1) & vindex_mask;
const uint32_t vidx2 = float_as_uint32(vn2) & vindex_mask;
const uint32_t vidx3 = float_as_uint32(vn3) & vindex_mask;
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs0 = uint32_as_float(xnn_table_exp2_k_over_64[vidx0] + ve0);
const float vs1 = uint32_as_float(xnn_table_exp2_k_over_64[vidx1] + ve1);
const float vs2 = uint32_as_float(xnn_table_exp2_k_over_64[vidx2] + ve2);
const float vs3 = uint32_as_float(xnn_table_exp2_k_over_64[vidx3] + ve3);
// Subtract the large number back to get final n := round(x * 64 / log(2)) as a floating-point number.
vn0 -= vmagic_bias;
vn1 -= vmagic_bias;
vn2 -= vmagic_bias;
vn3 -= vmagic_bias;
// Compute reduced argument t := x - n * log(2) / 64.
// Use Cody-Waite range reduction method (note the two constants representing log(2) / 64) to improve accuracy.
float vt0 = vn0 * vminus_ln2_hi + vx0;
float vt1 = vn1 * vminus_ln2_hi + vx1;
float vt2 = vn2 * vminus_ln2_hi + vx2;
float vt3 = vn3 * vminus_ln2_hi + vx3;
vt0 = vn0 * vminus_ln2_lo + vt0;
vt1 = vn1 * vminus_ln2_lo + vt1;
vt2 = vn2 * vminus_ln2_lo + vt2;
vt3 = vn3 * vminus_ln2_lo + vt3;
// Compute degree-2 polynomial approximation for exp(t) on [-log(2)/128, log(2)/128].
float vp0 = vt0 * vc2;
float vp1 = vt1 * vc2;
float vp2 = vt2 * vc2;
float vp3 = vt3 * vc2;
vp0 = vp0 * vt0 + vt0;
vp1 = vp1 * vt1 + vt1;
vp2 = vp2 * vt2 + vt2;
vp3 = vp3 * vt3 + vt3;
// Reconstruct the final f value:
// f = s * (1 + t * (1 + t * c2))
// = s * (1 + t + t * (t * c2))
// = s + s * (t + t * (t * c2))
// = s + s * p
float vf0 = vp0 * vs0 + vs0;
float vf1 = vp1 * vs1 + vs1;
float vf2 = vp2 * vs2 + vs2;
float vf3 = vp3 * vs3 + vs3;
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
if XNN_UNPREDICTABLE(vx0 < vdenorm_cutoff) {
vf0 = 0.0f;
}
if XNN_UNPREDICTABLE(vx1 < vdenorm_cutoff) {
vf1 = 0.0f;
}
if XNN_UNPREDICTABLE(vx2 < vdenorm_cutoff) {
vf2 = 0.0f;
}
if XNN_UNPREDICTABLE(vx3 < vdenorm_cutoff) {
vf3 = 0.0f;
}
// Store 4 outputs at a time.
output[0] = vf0;
output[1] = vf1;
output[2] = vf2;
output[3] = vf3;
output += 4;
// Accumulate computed exponents.
vacc0 += vf0;
vacc1 += vf1;
vacc2 += vf2;
vacc3 += vf3;
}
// Add up all accumulators to vacc0
vacc0 += vacc1;
vacc2 += vacc3;
vacc0 += vacc2;
float vacc = vacc0;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
// Load 1 input at a time.
const float vi = *input++;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const float vx = vi - vi_max;
// Compute reduced argument n := round(x * 64 / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to an integer, then subtracing
// the large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
// The trick with adding large number is valid only within certain bounds (|x * 64 / log(2)| <= 2**22, i.e.
// |x| <= 0x1.62E43p+15 = 45426.09375), but that is acceptable, because inputs outside of [-87.336540, 0.0]
// result in denormalized or underflown expf(x). We fixup the result for such inputs at the very end of the
// algorithm.
float vn = vx * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(n / 64) for such inputs that expf(x) is normalized,
// i.e. -87.33642 <= x <= 0.0. As n has 6 fractional bits, we split s == 2**(n / 64) = 2**e * 2**(n / 64 - e), where
// e := int(n / 64). We create s in two steps:
// 1. Fetch 2**(n / 64 - e) = 2**(n % 64) from the table using the 6 low bits of n, as integer. Note that the
// fetched values are in the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of e to its floating-point exponent. The result is always a normalized
// number, because for -87.33642 <= x <= 0.0 (inputs for which expf(x) is normalized) we have -126 <= e <= 0,
// and thus the adjusted exponent is not lower than -126.
//
// Extract e from bits 6:14 of n and shift it into bits 23:31 (position of floating-point exponent).
const uint32_t ve = (float_as_uint32(vn) & UINT32_C(0xFFFFFFC0)) << 17;
// Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**(n % 64).
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(xnn_table_exp2_k_over_64[vidx] + ve);
// Subtract the large number back to get final n := round(x * 64 / log(2)) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := x - n * log(2) / 64.
// Use Cody-Waite range reduction method (note the two constants representing log(2) / 64) to improve accuracy.
float vt = vn * vminus_ln2_hi + vx;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-2 polynomial approximation for exp(t) on [-log(2)/128, log(2)/128].
float vp = vt * vc2;
vp = vp * vt + vt;
// Reconstruct the final f value:
// f = s * (1 + t * (1 + t * c2))
// = s * (1 + t + t * (t * c2))
// = s + s * (t + t * (t * c2))
// = s + s * p
float vf = vp * vs + vs;
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
if XNN_UNPREDICTABLE(vx < vdenorm_cutoff) {
vf = 0.0f;
}
// Store 1 output at a time.
*output++ = vf;
// Accumulate computed exponents.
vacc += vf;
}
*sum = vacc;
}
| 10,230 | 41.807531 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-scalar-rr2-lut64-p2-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/scalar-rr2-lut64-p2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/raddstoreexpminusmax.h>
// Note redefine as uint32[] to avoid redundant bitcasts.
extern XNN_INTERNAL const uint32_t xnn_table_exp2_k_over_64[64];
void xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_lut64_p2_x4(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float vi_max = *max;
const float vlog2e = params->scalar_rr2_lut64_p2.log2e;
const float vmagic_bias = params->scalar_rr2_lut64_p2.magic_bias;
const uint32_t vindex_mask = UINT32_C(0x3F);
const float vminus_ln2_hi = params->scalar_rr2_lut64_p2.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_lut64_p2.minus_ln2_lo;
const float vc2 = params->scalar_rr2_lut64_p2.c2;
const float vdenorm_cutoff = params->scalar_rr2_lut64_p2.denorm_cutoff;
float vacc0 = 0.0f;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
// Load 4 inputs at a time.
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
input += 4;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const float vx0 = vi0 - vi_max;
const float vx1 = vi1 - vi_max;
const float vx2 = vi2 - vi_max;
const float vx3 = vi3 - vi_max;
// Compute reduced argument n := round(x * 64 / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to an integer, then subtracing
// the large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
// The trick with adding large number is valid only within certain bounds (|x * 64 / log(2)| <= 2**22, i.e.
// |x| <= 0x1.62E43p+15 = 45426.09375), but that is acceptable, because inputs outside of [-87.336540, 0.0]
// result in denormalized or underflown expf(x). We fixup the result for such inputs at the very end of the
// algorithm.
float vn0 = vx0 * vlog2e + vmagic_bias;
float vn1 = vx1 * vlog2e + vmagic_bias;
float vn2 = vx2 * vlog2e + vmagic_bias;
float vn3 = vx3 * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(n / 64) for such inputs that expf(x) is normalized,
// i.e. -87.33642 <= x <= 0.0. As n has 6 fractional bits, we split s == 2**(n / 64) = 2**e * 2**(n / 64 - e), where
// e := int(n / 64). We create s in two steps:
// 1. Fetch 2**(n / 64 - e) = 2**(n % 64) from the table using the 6 low bits of n, as integer. Note that the
// fetched values are in the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of e to its floating-point exponent. The result is always a normalized
// number, because for -87.33642 <= x <= 0.0 (inputs for which expf(x) is normalized) we have -126 <= e <= 0,
// and thus the adjusted exponent is not lower than -126.
//
// Extract e from bits 6:14 of n and shift it into bits 23:31 (position of floating-point exponent).
const uint32_t ve0 = (float_as_uint32(vn0) & UINT32_C(0xFFFFFFC0)) << 17;
const uint32_t ve1 = (float_as_uint32(vn1) & UINT32_C(0xFFFFFFC0)) << 17;
const uint32_t ve2 = (float_as_uint32(vn2) & UINT32_C(0xFFFFFFC0)) << 17;
const uint32_t ve3 = (float_as_uint32(vn3) & UINT32_C(0xFFFFFFC0)) << 17;
// Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**(n % 64).
const uint32_t vidx0 = float_as_uint32(vn0) & vindex_mask;
const uint32_t vidx1 = float_as_uint32(vn1) & vindex_mask;
const uint32_t vidx2 = float_as_uint32(vn2) & vindex_mask;
const uint32_t vidx3 = float_as_uint32(vn3) & vindex_mask;
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs0 = uint32_as_float(xnn_table_exp2_k_over_64[vidx0] + ve0);
const float vs1 = uint32_as_float(xnn_table_exp2_k_over_64[vidx1] + ve1);
const float vs2 = uint32_as_float(xnn_table_exp2_k_over_64[vidx2] + ve2);
const float vs3 = uint32_as_float(xnn_table_exp2_k_over_64[vidx3] + ve3);
// Subtract the large number back to get final n := round(x * 64 / log(2)) as a floating-point number.
vn0 -= vmagic_bias;
vn1 -= vmagic_bias;
vn2 -= vmagic_bias;
vn3 -= vmagic_bias;
// Compute reduced argument t := x - n * log(2) / 64.
// Use Cody-Waite range reduction method (note the two constants representing log(2) / 64) to improve accuracy.
float vt0 = vn0 * vminus_ln2_hi + vx0;
float vt1 = vn1 * vminus_ln2_hi + vx1;
float vt2 = vn2 * vminus_ln2_hi + vx2;
float vt3 = vn3 * vminus_ln2_hi + vx3;
vt0 = vn0 * vminus_ln2_lo + vt0;
vt1 = vn1 * vminus_ln2_lo + vt1;
vt2 = vn2 * vminus_ln2_lo + vt2;
vt3 = vn3 * vminus_ln2_lo + vt3;
// Compute degree-2 polynomial approximation for exp(t) on [-log(2)/128, log(2)/128].
float vp0 = vt0 * vc2;
float vp1 = vt1 * vc2;
float vp2 = vt2 * vc2;
float vp3 = vt3 * vc2;
vp0 = vp0 * vt0 + vt0;
vp1 = vp1 * vt1 + vt1;
vp2 = vp2 * vt2 + vt2;
vp3 = vp3 * vt3 + vt3;
// Reconstruct the final f value:
// f = s * (1 + t * (1 + t * c2))
// = s * (1 + t + t * (t * c2))
// = s + s * (t + t * (t * c2))
// = s + s * p
float vf0 = vp0 * vs0 + vs0;
float vf1 = vp1 * vs1 + vs1;
float vf2 = vp2 * vs2 + vs2;
float vf3 = vp3 * vs3 + vs3;
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
if XNN_UNPREDICTABLE(vx0 < vdenorm_cutoff) {
vf0 = 0.0f;
}
if XNN_UNPREDICTABLE(vx1 < vdenorm_cutoff) {
vf1 = 0.0f;
}
if XNN_UNPREDICTABLE(vx2 < vdenorm_cutoff) {
vf2 = 0.0f;
}
if XNN_UNPREDICTABLE(vx3 < vdenorm_cutoff) {
vf3 = 0.0f;
}
// Store 4 outputs at a time.
output[0] = vf0;
output[1] = vf1;
output[2] = vf2;
output[3] = vf3;
output += 4;
// Accumulate computed exponents.
vacc0 += vf0;
vacc0 += vf1;
vacc0 += vf2;
vacc0 += vf3;
}
float vacc = vacc0;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
// Load 1 input at a time.
const float vi = *input++;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const float vx = vi - vi_max;
// Compute reduced argument n := round(x * 64 / log(2)).
// We do it by adding a large number (magic bias), which cause rounding of the result to an integer, then subtracing
// the large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
// The trick with adding large number is valid only within certain bounds (|x * 64 / log(2)| <= 2**22, i.e.
// |x| <= 0x1.62E43p+15 = 45426.09375), but that is acceptable, because inputs outside of [-87.336540, 0.0]
// result in denormalized or underflown expf(x). We fixup the result for such inputs at the very end of the
// algorithm.
float vn = vx * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s := 2**(n / 64) for such inputs that expf(x) is normalized,
// i.e. -87.33642 <= x <= 0.0. As n has 6 fractional bits, we split s == 2**(n / 64) = 2**e * 2**(n / 64 - e), where
// e := int(n / 64). We create s in two steps:
// 1. Fetch 2**(n / 64 - e) = 2**(n % 64) from the table using the 6 low bits of n, as integer. Note that the
// fetched values are in the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
// 2. Adjust fecthed value by addition of e to its floating-point exponent. The result is always a normalized
// number, because for -87.33642 <= x <= 0.0 (inputs for which expf(x) is normalized) we have -126 <= e <= 0,
// and thus the adjusted exponent is not lower than -126.
//
// Extract e from bits 6:14 of n and shift it into bits 23:31 (position of floating-point exponent).
const uint32_t ve = (float_as_uint32(vn) & UINT32_C(0xFFFFFFC0)) << 17;
// Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**(n % 64).
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
// Adjust exponent of the value l fetched from the table to get the final s value.
const float vs = uint32_as_float(xnn_table_exp2_k_over_64[vidx] + ve);
// Subtract the large number back to get final n := round(x * 64 / log(2)) as a floating-point number.
vn -= vmagic_bias;
// Compute reduced argument t := x - n * log(2) / 64.
// Use Cody-Waite range reduction method (note the two constants representing log(2) / 64) to improve accuracy.
float vt = vn * vminus_ln2_hi + vx;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-2 polynomial approximation for exp(t) on [-log(2)/128, log(2)/128].
float vp = vt * vc2;
vp = vp * vt + vt;
// Reconstruct the final f value:
// f = s * (1 + t * (1 + t * c2))
// = s * (1 + t + t * (t * c2))
// = s + s * (t + t * (t * c2))
// = s + s * p
float vf = vp * vs + vs;
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
if XNN_UNPREDICTABLE(vx < vdenorm_cutoff) {
vf = 0.0f;
}
// Store 1 output at a time.
*output++ = vf;
// Accumulate computed exponents.
vacc += vf;
}
*sum = vacc;
}
| 10,067 | 42.396552 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-scalar-rr2-p5-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/scalar-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_x1(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float vi_max = *max;
const float vlog2e = params->scalar_rr2_p5.log2e;
const float vmagic_bias = params->scalar_rr2_p5.magic_bias;
const float vminus_ln2_hi = params->scalar_rr2_p5.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_p5.minus_ln2_lo;
const float vc5 = params->scalar_rr2_p5.c5;
const float vc4 = params->scalar_rr2_p5.c4;
const float vc3 = params->scalar_rr2_p5.c3;
const float vc2 = params->scalar_rr2_p5.c2;
const float vc1 = params->scalar_rr2_p5.c1;
const float vdenorm_cutoff = params->scalar_rr2_p5.denorm_cutoff;
float vacc = 0.0f;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
// Load 1 input at a time.
const float vi = *input++;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const float vx = vi - vi_max;
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias) to the product x * (1/log(2)), which cause rounding of the result
// to an integer, then subtracing the large number back. The trick with adding large number is valid only within
// certain bounds (|x| <= 2**22), but that's ok, because inputs outside of [-87.336540, 0.0] underflow expf(x)
// anyway. We fixup the result for such inputs at the very end of the algorithm.
float vn = vx * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= n <= 0 accordingly.
const float vs = uint32_as_float(float_as_uint32(vn) << 23);
// Subtract the large number back to get final n := round(x / log(2)).
vn -= vmagic_bias;
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vx;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
float vp = vc5 * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vc1;
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt *= vs;
float vf = vt * vp + vs;
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
if XNN_UNPREDICTABLE(vx < vdenorm_cutoff) {
vf = 0.0f;
}
// Store 1 output at a time.
*output++ = vf;
// Accumulate computed exponents.
vacc += vf;
}
*sum = vacc;
}
| 3,554 | 35.27551 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-scalar-rr2-p5-x2-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/scalar-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_x2_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float vi_max = *max;
const float vlog2e = params->scalar_rr2_p5.log2e;
const float vmagic_bias = params->scalar_rr2_p5.magic_bias;
const float vminus_ln2_hi = params->scalar_rr2_p5.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_p5.minus_ln2_lo;
const float vc5 = params->scalar_rr2_p5.c5;
const float vc4 = params->scalar_rr2_p5.c4;
const float vc3 = params->scalar_rr2_p5.c3;
const float vc2 = params->scalar_rr2_p5.c2;
const float vc1 = params->scalar_rr2_p5.c1;
const float vdenorm_cutoff = params->scalar_rr2_p5.denorm_cutoff;
float vacc0 = 0.0f;
float vacc1 = 0.0f;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
// Load 2 inputs at a time.
const float vi0 = input[0];
const float vi1 = input[1];
input += 2;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const float vx0 = vi0 - vi_max;
const float vx1 = vi1 - vi_max;
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias) to the product x * (1/log(2)), which cause rounding of the result
// to an integer, then subtracing the large number back. The trick with adding large number is valid only within
// certain bounds (|x| <= 2**22), but that's ok, because inputs outside of [-87.336540, 0.0] underflow expf(x)
// anyway. We fixup the result for such inputs at the very end of the algorithm.
float vn0 = vx0 * vlog2e + vmagic_bias;
float vn1 = vx1 * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= n <= 0 accordingly.
const float vs0 = uint32_as_float(float_as_uint32(vn0) << 23);
const float vs1 = uint32_as_float(float_as_uint32(vn1) << 23);
// Subtract the large number back to get final n := round(x / log(2)).
vn0 -= vmagic_bias;
vn1 -= vmagic_bias;
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt0 = vn0 * vminus_ln2_hi + vx0;
float vt1 = vn1 * vminus_ln2_hi + vx1;
vt0 = vn0 * vminus_ln2_lo + vt0;
vt1 = vn1 * vminus_ln2_lo + vt1;
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
float vp0 = vc5 * vt0 + vc4;
float vp1 = vc5 * vt1 + vc4;
vp0 = vp0 * vt0 + vc3;
vp1 = vp1 * vt1 + vc3;
vp0 = vp0 * vt0 + vc2;
vp1 = vp1 * vt1 + vc2;
vp0 = vp0 * vt0 + vc1;
vp1 = vp1 * vt1 + vc1;
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 *= vs0;
vt1 *= vs1;
float vf0 = vt0 * vp0 + vs0;
float vf1 = vt1 * vp1 + vs1;
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
if XNN_UNPREDICTABLE(vx0 < vdenorm_cutoff) {
vf0 = 0.0f;
}
if XNN_UNPREDICTABLE(vx1 < vdenorm_cutoff) {
vf1 = 0.0f;
}
// Store 2 outputs at a time.
output[0] = vf0;
output[1] = vf1;
output += 2;
// Accumulate computed exponents.
vacc0 += vf0;
vacc1 += vf1;
}
// Add up all accumulators to vacc0
vacc0 += vacc1;
float vacc = vacc0;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
// Load 1 input at a time.
const float vi = *input++;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const float vx = vi - vi_max;
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias) to the product x * (1/log(2)), which cause rounding of the result
// to an integer, then subtracing the large number back. The trick with adding large number is valid only within
// certain bounds (|x| <= 2**22), but that's ok, because inputs outside of [-87.336540, 0.0] underflow expf(x)
// anyway. We fixup the result for such inputs at the very end of the algorithm.
float vn = vx * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= n <= 0 accordingly.
const float vs = uint32_as_float(float_as_uint32(vn) << 23);
// Subtract the large number back to get final n := round(x / log(2)).
vn -= vmagic_bias;
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vx;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
float vp = vc5 * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vc1;
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt *= vs;
float vf = vt * vp + vs;
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
if XNN_UNPREDICTABLE(vx < vdenorm_cutoff) {
vf = 0.0f;
}
// Store 1 output at a time.
*output++ = vf;
// Accumulate computed exponents.
vacc += vf;
}
*sum = vacc;
}
| 6,432 | 34.938547 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-scalar-rr2-p5-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/scalar-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_x2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float vi_max = *max;
const float vlog2e = params->scalar_rr2_p5.log2e;
const float vmagic_bias = params->scalar_rr2_p5.magic_bias;
const float vminus_ln2_hi = params->scalar_rr2_p5.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_p5.minus_ln2_lo;
const float vc5 = params->scalar_rr2_p5.c5;
const float vc4 = params->scalar_rr2_p5.c4;
const float vc3 = params->scalar_rr2_p5.c3;
const float vc2 = params->scalar_rr2_p5.c2;
const float vc1 = params->scalar_rr2_p5.c1;
const float vdenorm_cutoff = params->scalar_rr2_p5.denorm_cutoff;
float vacc0 = 0.0f;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
// Load 2 inputs at a time.
const float vi0 = input[0];
const float vi1 = input[1];
input += 2;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const float vx0 = vi0 - vi_max;
const float vx1 = vi1 - vi_max;
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias) to the product x * (1/log(2)), which cause rounding of the result
// to an integer, then subtracing the large number back. The trick with adding large number is valid only within
// certain bounds (|x| <= 2**22), but that's ok, because inputs outside of [-87.336540, 0.0] underflow expf(x)
// anyway. We fixup the result for such inputs at the very end of the algorithm.
float vn0 = vx0 * vlog2e + vmagic_bias;
float vn1 = vx1 * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= n <= 0 accordingly.
const float vs0 = uint32_as_float(float_as_uint32(vn0) << 23);
const float vs1 = uint32_as_float(float_as_uint32(vn1) << 23);
// Subtract the large number back to get final n := round(x / log(2)).
vn0 -= vmagic_bias;
vn1 -= vmagic_bias;
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt0 = vn0 * vminus_ln2_hi + vx0;
float vt1 = vn1 * vminus_ln2_hi + vx1;
vt0 = vn0 * vminus_ln2_lo + vt0;
vt1 = vn1 * vminus_ln2_lo + vt1;
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
float vp0 = vc5 * vt0 + vc4;
float vp1 = vc5 * vt1 + vc4;
vp0 = vp0 * vt0 + vc3;
vp1 = vp1 * vt1 + vc3;
vp0 = vp0 * vt0 + vc2;
vp1 = vp1 * vt1 + vc2;
vp0 = vp0 * vt0 + vc1;
vp1 = vp1 * vt1 + vc1;
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 *= vs0;
vt1 *= vs1;
float vf0 = vt0 * vp0 + vs0;
float vf1 = vt1 * vp1 + vs1;
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
if XNN_UNPREDICTABLE(vx0 < vdenorm_cutoff) {
vf0 = 0.0f;
}
if XNN_UNPREDICTABLE(vx1 < vdenorm_cutoff) {
vf1 = 0.0f;
}
// Store 2 outputs at a time.
output[0] = vf0;
output[1] = vf1;
output += 2;
// Accumulate computed exponents.
vacc0 += vf0;
vacc0 += vf1;
}
float vacc = vacc0;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
// Load 1 input at a time.
const float vi = *input++;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const float vx = vi - vi_max;
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias) to the product x * (1/log(2)), which cause rounding of the result
// to an integer, then subtracing the large number back. The trick with adding large number is valid only within
// certain bounds (|x| <= 2**22), but that's ok, because inputs outside of [-87.336540, 0.0] underflow expf(x)
// anyway. We fixup the result for such inputs at the very end of the algorithm.
float vn = vx * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= n <= 0 accordingly.
const float vs = uint32_as_float(float_as_uint32(vn) << 23);
// Subtract the large number back to get final n := round(x / log(2)).
vn -= vmagic_bias;
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vx;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
float vp = vc5 * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vc1;
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt *= vs;
float vf = vt * vp + vs;
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
if XNN_UNPREDICTABLE(vx < vdenorm_cutoff) {
vf = 0.0f;
}
// Store 1 output at a time.
*output++ = vf;
// Accumulate computed exponents.
vacc += vf;
}
*sum = vacc;
}
| 6,349 | 35.079545 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-scalar-rr2-p5-x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/scalar-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_x4_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float vi_max = *max;
const float vlog2e = params->scalar_rr2_p5.log2e;
const float vmagic_bias = params->scalar_rr2_p5.magic_bias;
const float vminus_ln2_hi = params->scalar_rr2_p5.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_p5.minus_ln2_lo;
const float vc5 = params->scalar_rr2_p5.c5;
const float vc4 = params->scalar_rr2_p5.c4;
const float vc3 = params->scalar_rr2_p5.c3;
const float vc2 = params->scalar_rr2_p5.c2;
const float vc1 = params->scalar_rr2_p5.c1;
const float vdenorm_cutoff = params->scalar_rr2_p5.denorm_cutoff;
float vacc0 = 0.0f;
float vacc1 = 0.0f;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
// Load 4 inputs at a time.
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
input += 4;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const float vx0 = vi0 - vi_max;
const float vx1 = vi1 - vi_max;
const float vx2 = vi2 - vi_max;
const float vx3 = vi3 - vi_max;
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias) to the product x * (1/log(2)), which cause rounding of the result
// to an integer, then subtracing the large number back. The trick with adding large number is valid only within
// certain bounds (|x| <= 2**22), but that's ok, because inputs outside of [-87.336540, 0.0] underflow expf(x)
// anyway. We fixup the result for such inputs at the very end of the algorithm.
float vn0 = vx0 * vlog2e + vmagic_bias;
float vn1 = vx1 * vlog2e + vmagic_bias;
float vn2 = vx2 * vlog2e + vmagic_bias;
float vn3 = vx3 * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= n <= 0 accordingly.
const float vs0 = uint32_as_float(float_as_uint32(vn0) << 23);
const float vs1 = uint32_as_float(float_as_uint32(vn1) << 23);
const float vs2 = uint32_as_float(float_as_uint32(vn2) << 23);
const float vs3 = uint32_as_float(float_as_uint32(vn3) << 23);
// Subtract the large number back to get final n := round(x / log(2)).
vn0 -= vmagic_bias;
vn1 -= vmagic_bias;
vn2 -= vmagic_bias;
vn3 -= vmagic_bias;
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt0 = vn0 * vminus_ln2_hi + vx0;
float vt1 = vn1 * vminus_ln2_hi + vx1;
float vt2 = vn2 * vminus_ln2_hi + vx2;
float vt3 = vn3 * vminus_ln2_hi + vx3;
vt0 = vn0 * vminus_ln2_lo + vt0;
vt1 = vn1 * vminus_ln2_lo + vt1;
vt2 = vn2 * vminus_ln2_lo + vt2;
vt3 = vn3 * vminus_ln2_lo + vt3;
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
float vp0 = vc5 * vt0 + vc4;
float vp1 = vc5 * vt1 + vc4;
float vp2 = vc5 * vt2 + vc4;
float vp3 = vc5 * vt3 + vc4;
vp0 = vp0 * vt0 + vc3;
vp1 = vp1 * vt1 + vc3;
vp2 = vp2 * vt2 + vc3;
vp3 = vp3 * vt3 + vc3;
vp0 = vp0 * vt0 + vc2;
vp1 = vp1 * vt1 + vc2;
vp2 = vp2 * vt2 + vc2;
vp3 = vp3 * vt3 + vc2;
vp0 = vp0 * vt0 + vc1;
vp1 = vp1 * vt1 + vc1;
vp2 = vp2 * vt2 + vc1;
vp3 = vp3 * vt3 + vc1;
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 *= vs0;
vt1 *= vs1;
vt2 *= vs2;
vt3 *= vs3;
float vf0 = vt0 * vp0 + vs0;
float vf1 = vt1 * vp1 + vs1;
float vf2 = vt2 * vp2 + vs2;
float vf3 = vt3 * vp3 + vs3;
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
if XNN_UNPREDICTABLE(vx0 < vdenorm_cutoff) {
vf0 = 0.0f;
}
if XNN_UNPREDICTABLE(vx1 < vdenorm_cutoff) {
vf1 = 0.0f;
}
if XNN_UNPREDICTABLE(vx2 < vdenorm_cutoff) {
vf2 = 0.0f;
}
if XNN_UNPREDICTABLE(vx3 < vdenorm_cutoff) {
vf3 = 0.0f;
}
// Store 4 outputs at a time.
output[0] = vf0;
output[1] = vf1;
output[2] = vf2;
output[3] = vf3;
output += 4;
// Accumulate computed exponents.
vacc0 += vf0;
vacc1 += vf1;
vacc0 += vf2;
vacc1 += vf3;
}
// Add up all accumulators to vacc0
vacc0 += vacc1;
float vacc = vacc0;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
// Load 1 input at a time.
const float vi = *input++;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const float vx = vi - vi_max;
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias) to the product x * (1/log(2)), which cause rounding of the result
// to an integer, then subtracing the large number back. The trick with adding large number is valid only within
// certain bounds (|x| <= 2**22), but that's ok, because inputs outside of [-87.336540, 0.0] underflow expf(x)
// anyway. We fixup the result for such inputs at the very end of the algorithm.
float vn = vx * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= n <= 0 accordingly.
const float vs = uint32_as_float(float_as_uint32(vn) << 23);
// Subtract the large number back to get final n := round(x / log(2)).
vn -= vmagic_bias;
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vx;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
float vp = vc5 * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vc1;
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt *= vs;
float vf = vt * vp + vs;
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
if XNN_UNPREDICTABLE(vx < vdenorm_cutoff) {
vf = 0.0f;
}
// Store 1 output at a time.
*output++ = vf;
// Accumulate computed exponents.
vacc += vf;
}
*sum = vacc;
}
| 7,548 | 34.111628 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-scalar-rr2-p5-x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/scalar-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_x4_acc4(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float vi_max = *max;
const float vlog2e = params->scalar_rr2_p5.log2e;
const float vmagic_bias = params->scalar_rr2_p5.magic_bias;
const float vminus_ln2_hi = params->scalar_rr2_p5.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_p5.minus_ln2_lo;
const float vc5 = params->scalar_rr2_p5.c5;
const float vc4 = params->scalar_rr2_p5.c4;
const float vc3 = params->scalar_rr2_p5.c3;
const float vc2 = params->scalar_rr2_p5.c2;
const float vc1 = params->scalar_rr2_p5.c1;
const float vdenorm_cutoff = params->scalar_rr2_p5.denorm_cutoff;
float vacc0 = 0.0f;
float vacc1 = 0.0f;
float vacc2 = 0.0f;
float vacc3 = 0.0f;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
// Load 4 inputs at a time.
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
input += 4;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const float vx0 = vi0 - vi_max;
const float vx1 = vi1 - vi_max;
const float vx2 = vi2 - vi_max;
const float vx3 = vi3 - vi_max;
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias) to the product x * (1/log(2)), which cause rounding of the result
// to an integer, then subtracing the large number back. The trick with adding large number is valid only within
// certain bounds (|x| <= 2**22), but that's ok, because inputs outside of [-87.336540, 0.0] underflow expf(x)
// anyway. We fixup the result for such inputs at the very end of the algorithm.
float vn0 = vx0 * vlog2e + vmagic_bias;
float vn1 = vx1 * vlog2e + vmagic_bias;
float vn2 = vx2 * vlog2e + vmagic_bias;
float vn3 = vx3 * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= n <= 0 accordingly.
const float vs0 = uint32_as_float(float_as_uint32(vn0) << 23);
const float vs1 = uint32_as_float(float_as_uint32(vn1) << 23);
const float vs2 = uint32_as_float(float_as_uint32(vn2) << 23);
const float vs3 = uint32_as_float(float_as_uint32(vn3) << 23);
// Subtract the large number back to get final n := round(x / log(2)).
vn0 -= vmagic_bias;
vn1 -= vmagic_bias;
vn2 -= vmagic_bias;
vn3 -= vmagic_bias;
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt0 = vn0 * vminus_ln2_hi + vx0;
float vt1 = vn1 * vminus_ln2_hi + vx1;
float vt2 = vn2 * vminus_ln2_hi + vx2;
float vt3 = vn3 * vminus_ln2_hi + vx3;
vt0 = vn0 * vminus_ln2_lo + vt0;
vt1 = vn1 * vminus_ln2_lo + vt1;
vt2 = vn2 * vminus_ln2_lo + vt2;
vt3 = vn3 * vminus_ln2_lo + vt3;
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
float vp0 = vc5 * vt0 + vc4;
float vp1 = vc5 * vt1 + vc4;
float vp2 = vc5 * vt2 + vc4;
float vp3 = vc5 * vt3 + vc4;
vp0 = vp0 * vt0 + vc3;
vp1 = vp1 * vt1 + vc3;
vp2 = vp2 * vt2 + vc3;
vp3 = vp3 * vt3 + vc3;
vp0 = vp0 * vt0 + vc2;
vp1 = vp1 * vt1 + vc2;
vp2 = vp2 * vt2 + vc2;
vp3 = vp3 * vt3 + vc2;
vp0 = vp0 * vt0 + vc1;
vp1 = vp1 * vt1 + vc1;
vp2 = vp2 * vt2 + vc1;
vp3 = vp3 * vt3 + vc1;
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 *= vs0;
vt1 *= vs1;
vt2 *= vs2;
vt3 *= vs3;
float vf0 = vt0 * vp0 + vs0;
float vf1 = vt1 * vp1 + vs1;
float vf2 = vt2 * vp2 + vs2;
float vf3 = vt3 * vp3 + vs3;
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
if XNN_UNPREDICTABLE(vx0 < vdenorm_cutoff) {
vf0 = 0.0f;
}
if XNN_UNPREDICTABLE(vx1 < vdenorm_cutoff) {
vf1 = 0.0f;
}
if XNN_UNPREDICTABLE(vx2 < vdenorm_cutoff) {
vf2 = 0.0f;
}
if XNN_UNPREDICTABLE(vx3 < vdenorm_cutoff) {
vf3 = 0.0f;
}
// Store 4 outputs at a time.
output[0] = vf0;
output[1] = vf1;
output[2] = vf2;
output[3] = vf3;
output += 4;
// Accumulate computed exponents.
vacc0 += vf0;
vacc1 += vf1;
vacc2 += vf2;
vacc3 += vf3;
}
// Add up all accumulators to vacc0
vacc0 += vacc1;
vacc2 += vacc3;
vacc0 += vacc2;
float vacc = vacc0;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
// Load 1 input at a time.
const float vi = *input++;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const float vx = vi - vi_max;
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias) to the product x * (1/log(2)), which cause rounding of the result
// to an integer, then subtracing the large number back. The trick with adding large number is valid only within
// certain bounds (|x| <= 2**22), but that's ok, because inputs outside of [-87.336540, 0.0] underflow expf(x)
// anyway. We fixup the result for such inputs at the very end of the algorithm.
float vn = vx * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= n <= 0 accordingly.
const float vs = uint32_as_float(float_as_uint32(vn) << 23);
// Subtract the large number back to get final n := round(x / log(2)).
vn -= vmagic_bias;
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vx;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
float vp = vc5 * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vc1;
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt *= vs;
float vf = vt * vp + vs;
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
if XNN_UNPREDICTABLE(vx < vdenorm_cutoff) {
vf = 0.0f;
}
// Store 1 output at a time.
*output++ = vf;
// Accumulate computed exponents.
vacc += vf;
}
*sum = vacc;
}
| 7,628 | 33.835616 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-scalar-rr2-p5-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/scalar-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_x4(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const float vi_max = *max;
const float vlog2e = params->scalar_rr2_p5.log2e;
const float vmagic_bias = params->scalar_rr2_p5.magic_bias;
const float vminus_ln2_hi = params->scalar_rr2_p5.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_p5.minus_ln2_lo;
const float vc5 = params->scalar_rr2_p5.c5;
const float vc4 = params->scalar_rr2_p5.c4;
const float vc3 = params->scalar_rr2_p5.c3;
const float vc2 = params->scalar_rr2_p5.c2;
const float vc1 = params->scalar_rr2_p5.c1;
const float vdenorm_cutoff = params->scalar_rr2_p5.denorm_cutoff;
float vacc0 = 0.0f;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
// Load 4 inputs at a time.
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
input += 4;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const float vx0 = vi0 - vi_max;
const float vx1 = vi1 - vi_max;
const float vx2 = vi2 - vi_max;
const float vx3 = vi3 - vi_max;
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias) to the product x * (1/log(2)), which cause rounding of the result
// to an integer, then subtracing the large number back. The trick with adding large number is valid only within
// certain bounds (|x| <= 2**22), but that's ok, because inputs outside of [-87.336540, 0.0] underflow expf(x)
// anyway. We fixup the result for such inputs at the very end of the algorithm.
float vn0 = vx0 * vlog2e + vmagic_bias;
float vn1 = vx1 * vlog2e + vmagic_bias;
float vn2 = vx2 * vlog2e + vmagic_bias;
float vn3 = vx3 * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= n <= 0 accordingly.
const float vs0 = uint32_as_float(float_as_uint32(vn0) << 23);
const float vs1 = uint32_as_float(float_as_uint32(vn1) << 23);
const float vs2 = uint32_as_float(float_as_uint32(vn2) << 23);
const float vs3 = uint32_as_float(float_as_uint32(vn3) << 23);
// Subtract the large number back to get final n := round(x / log(2)).
vn0 -= vmagic_bias;
vn1 -= vmagic_bias;
vn2 -= vmagic_bias;
vn3 -= vmagic_bias;
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt0 = vn0 * vminus_ln2_hi + vx0;
float vt1 = vn1 * vminus_ln2_hi + vx1;
float vt2 = vn2 * vminus_ln2_hi + vx2;
float vt3 = vn3 * vminus_ln2_hi + vx3;
vt0 = vn0 * vminus_ln2_lo + vt0;
vt1 = vn1 * vminus_ln2_lo + vt1;
vt2 = vn2 * vminus_ln2_lo + vt2;
vt3 = vn3 * vminus_ln2_lo + vt3;
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
float vp0 = vc5 * vt0 + vc4;
float vp1 = vc5 * vt1 + vc4;
float vp2 = vc5 * vt2 + vc4;
float vp3 = vc5 * vt3 + vc4;
vp0 = vp0 * vt0 + vc3;
vp1 = vp1 * vt1 + vc3;
vp2 = vp2 * vt2 + vc3;
vp3 = vp3 * vt3 + vc3;
vp0 = vp0 * vt0 + vc2;
vp1 = vp1 * vt1 + vc2;
vp2 = vp2 * vt2 + vc2;
vp3 = vp3 * vt3 + vc2;
vp0 = vp0 * vt0 + vc1;
vp1 = vp1 * vt1 + vc1;
vp2 = vp2 * vt2 + vc1;
vp3 = vp3 * vt3 + vc1;
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0 *= vs0;
vt1 *= vs1;
vt2 *= vs2;
vt3 *= vs3;
float vf0 = vt0 * vp0 + vs0;
float vf1 = vt1 * vp1 + vs1;
float vf2 = vt2 * vp2 + vs2;
float vf3 = vt3 * vp3 + vs3;
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
if XNN_UNPREDICTABLE(vx0 < vdenorm_cutoff) {
vf0 = 0.0f;
}
if XNN_UNPREDICTABLE(vx1 < vdenorm_cutoff) {
vf1 = 0.0f;
}
if XNN_UNPREDICTABLE(vx2 < vdenorm_cutoff) {
vf2 = 0.0f;
}
if XNN_UNPREDICTABLE(vx3 < vdenorm_cutoff) {
vf3 = 0.0f;
}
// Store 4 outputs at a time.
output[0] = vf0;
output[1] = vf1;
output[2] = vf2;
output[3] = vf3;
output += 4;
// Accumulate computed exponents.
vacc0 += vf0;
vacc0 += vf1;
vacc0 += vf2;
vacc0 += vf3;
}
float vacc = vacc0;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
// Load 1 input at a time.
const float vi = *input++;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const float vx = vi - vi_max;
// Compute reduced argument n := round(x / log(2)).
// We do it by adding a large number (magic bias) to the product x * (1/log(2)), which cause rounding of the result
// to an integer, then subtracing the large number back. The trick with adding large number is valid only within
// certain bounds (|x| <= 2**22), but that's ok, because inputs outside of [-87.336540, 0.0] underflow expf(x)
// anyway. We fixup the result for such inputs at the very end of the algorithm.
float vn = vx * vlog2e + vmagic_bias;
// Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= n <= 0 accordingly.
const float vs = uint32_as_float(float_as_uint32(vn) << 23);
// Subtract the large number back to get final n := round(x / log(2)).
vn -= vmagic_bias;
// Compute reduced argument t := x - n * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
float vt = vn * vminus_ln2_hi + vx;
vt = vn * vminus_ln2_lo + vt;
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
float vp = vc5 * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp = vp * vt + vc1;
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt *= vs;
float vf = vt * vp + vs;
// For inputs below denormal cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
if XNN_UNPREDICTABLE(vx < vdenorm_cutoff) {
vf = 0.0f;
}
// Store 1 output at a time.
*output++ = vf;
// Accumulate computed exponents.
vacc += vf;
}
*sum = vacc;
}
| 7,465 | 34.216981 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-sse2-rr2-p5-x12-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/sse2-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x12_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m128 vi_max = _mm_load1_ps(max);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p5.magic_bias);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_lo);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p5.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p5.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p5.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p5.c2);
const __m128 vc1 = _mm_load_ps(params->sse2_rr2_p5.c1);
const __m128 vdenorm_cutoff = _mm_load_ps(params->sse2_rr2_p5.denorm_cutoff);
__m128 vacc0 = _mm_setzero_ps();
__m128 vacc1 = _mm_setzero_ps();
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
// Load 12 (3x4) inputs at a time.
const __m128 vi0123 = _mm_loadu_ps(input);
const __m128 vi4567 = _mm_loadu_ps(input + 4);
const __m128 vi89AB = _mm_loadu_ps(input + 8);
input += 12;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx0123 = _mm_sub_ps(vi0123, vi_max);
const __m128 vx4567 = _mm_sub_ps(vi4567, vi_max);
const __m128 vx89AB = _mm_sub_ps(vi89AB, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vx0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vx4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vx89AB, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vx0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vx4567);
__m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vx89AB);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc5, vt0123), vc4);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc5, vt89AB), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc1);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc1);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0123 = _mm_mul_ps(vt0123, vs0123);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vt89AB = _mm_mul_ps(vt89AB, vs89AB);
__m128 vf0123 = _mm_add_ps(_mm_mul_ps(vt0123, vp0123), vs0123);
__m128 vf4567 = _mm_add_ps(_mm_mul_ps(vt4567, vp4567), vs4567);
__m128 vf89AB = _mm_add_ps(_mm_mul_ps(vt89AB, vp89AB), vs89AB);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0123 = _mm_andnot_ps(_mm_cmplt_ps(vx0123, vdenorm_cutoff), vf0123);
vf4567 = _mm_andnot_ps(_mm_cmplt_ps(vx4567, vdenorm_cutoff), vf4567);
vf89AB = _mm_andnot_ps(_mm_cmplt_ps(vx89AB, vdenorm_cutoff), vf89AB);
// Store 12 (3x4) outputs at a time.
_mm_storeu_ps(output, vf0123);
_mm_storeu_ps(output + 4, vf4567);
_mm_storeu_ps(output + 8, vf89AB);
output += 12;
// Accumulate computed exponents.
vacc0 = _mm_add_ps(vacc0, vf0123);
vacc0 = _mm_add_ps(vacc0, vf4567);
vacc0 = _mm_add_ps(vacc0, vf89AB);
}
// Add up all accumulators to vacc0
vacc0 = _mm_add_ps(vacc0, vacc1);
__m128 vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
// Load 4 inputs at a time.
const __m128 vi = _mm_loadu_ps(input);
input += 4;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx = _mm_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm_mul_ps(vt, vs);
__m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf);
// Store 4 outputs at a time.
_mm_storeu_ps(output, vf);
output += 4;
// Accumulate computed exponents.
vacc = _mm_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
// Load 4 inputs at a time.
const __m128 vi = _mm_loadu_ps(input);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx = _mm_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm_mul_ps(vt, vs);
__m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf);
if (batch & (2 * sizeof(float))) {
// Store 2 outputs at a time.
_mm_storel_pi((__m64*) output, vf);
output += 2;
// Accumulate 2 computed exponents.
vacc = _mm_add_ps(vacc, _mm_movelh_ps(vf, _mm_setzero_ps()));
vf = _mm_movehl_ps(vf, vf);
}
if (batch & (1 * sizeof(float))) {
// Store 1 output at a time.
_mm_store_ss(output, vf);
// Accumulate 1 computed exponent.
vacc = _mm_add_ss(vacc, vf);
}
}
// Reduce 4 batch in the SIMD register
vacc = _mm_add_ps(vacc, _mm_movehl_ps(vacc, vacc));
vacc = _mm_add_ss(vacc, _mm_shuffle_ps(vacc, vacc, _MM_SHUFFLE(2, 3, 0, 1)));
_mm_store_ss(sum, vacc);
}
| 10,395 | 41.260163 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-sse2-rr2-p5-x12-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/sse2-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x12_acc3(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m128 vi_max = _mm_load1_ps(max);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p5.magic_bias);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_lo);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p5.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p5.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p5.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p5.c2);
const __m128 vc1 = _mm_load_ps(params->sse2_rr2_p5.c1);
const __m128 vdenorm_cutoff = _mm_load_ps(params->sse2_rr2_p5.denorm_cutoff);
__m128 vacc0 = _mm_setzero_ps();
__m128 vacc1 = _mm_setzero_ps();
__m128 vacc2 = _mm_setzero_ps();
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
// Load 12 (3x4) inputs at a time.
const __m128 vi0123 = _mm_loadu_ps(input);
const __m128 vi4567 = _mm_loadu_ps(input + 4);
const __m128 vi89AB = _mm_loadu_ps(input + 8);
input += 12;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx0123 = _mm_sub_ps(vi0123, vi_max);
const __m128 vx4567 = _mm_sub_ps(vi4567, vi_max);
const __m128 vx89AB = _mm_sub_ps(vi89AB, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vx0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vx4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vx89AB, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vx0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vx4567);
__m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vx89AB);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc5, vt0123), vc4);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc5, vt89AB), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc1);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc1);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0123 = _mm_mul_ps(vt0123, vs0123);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vt89AB = _mm_mul_ps(vt89AB, vs89AB);
__m128 vf0123 = _mm_add_ps(_mm_mul_ps(vt0123, vp0123), vs0123);
__m128 vf4567 = _mm_add_ps(_mm_mul_ps(vt4567, vp4567), vs4567);
__m128 vf89AB = _mm_add_ps(_mm_mul_ps(vt89AB, vp89AB), vs89AB);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0123 = _mm_andnot_ps(_mm_cmplt_ps(vx0123, vdenorm_cutoff), vf0123);
vf4567 = _mm_andnot_ps(_mm_cmplt_ps(vx4567, vdenorm_cutoff), vf4567);
vf89AB = _mm_andnot_ps(_mm_cmplt_ps(vx89AB, vdenorm_cutoff), vf89AB);
// Store 12 (3x4) outputs at a time.
_mm_storeu_ps(output, vf0123);
_mm_storeu_ps(output + 4, vf4567);
_mm_storeu_ps(output + 8, vf89AB);
output += 12;
// Accumulate computed exponents.
vacc0 = _mm_add_ps(vacc0, vf0123);
vacc1 = _mm_add_ps(vacc1, vf4567);
vacc2 = _mm_add_ps(vacc2, vf89AB);
}
// Add up all accumulators to vacc0
vacc0 = _mm_add_ps(vacc0, vacc1);
vacc0 = _mm_add_ps(vacc0, vacc2);
__m128 vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
// Load 4 inputs at a time.
const __m128 vi = _mm_loadu_ps(input);
input += 4;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx = _mm_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm_mul_ps(vt, vs);
__m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf);
// Store 4 outputs at a time.
_mm_storeu_ps(output, vf);
output += 4;
// Accumulate computed exponents.
vacc = _mm_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
// Load 4 inputs at a time.
const __m128 vi = _mm_loadu_ps(input);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx = _mm_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm_mul_ps(vt, vs);
__m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf);
if (batch & (2 * sizeof(float))) {
// Store 2 outputs at a time.
_mm_storel_pi((__m64*) output, vf);
output += 2;
// Accumulate 2 computed exponents.
vacc = _mm_add_ps(vacc, _mm_movelh_ps(vf, _mm_setzero_ps()));
vf = _mm_movehl_ps(vf, vf);
}
if (batch & (1 * sizeof(float))) {
// Store 1 output at a time.
_mm_store_ss(output, vf);
// Accumulate 1 computed exponent.
vacc = _mm_add_ss(vacc, vf);
}
}
// Reduce 4 batch in the SIMD register
vacc = _mm_add_ps(vacc, _mm_movehl_ps(vacc, vacc));
vacc = _mm_add_ss(vacc, _mm_shuffle_ps(vacc, vacc, _MM_SHUFFLE(2, 3, 0, 1)));
_mm_store_ss(sum, vacc);
}
| 10,466 | 41.205645 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-sse2-rr2-p5-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/sse2-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x12(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m128 vi_max = _mm_load1_ps(max);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p5.magic_bias);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_lo);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p5.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p5.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p5.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p5.c2);
const __m128 vc1 = _mm_load_ps(params->sse2_rr2_p5.c1);
const __m128 vdenorm_cutoff = _mm_load_ps(params->sse2_rr2_p5.denorm_cutoff);
__m128 vacc0 = _mm_setzero_ps();
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
// Load 12 (3x4) inputs at a time.
const __m128 vi0123 = _mm_loadu_ps(input);
const __m128 vi4567 = _mm_loadu_ps(input + 4);
const __m128 vi89AB = _mm_loadu_ps(input + 8);
input += 12;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx0123 = _mm_sub_ps(vi0123, vi_max);
const __m128 vx4567 = _mm_sub_ps(vi4567, vi_max);
const __m128 vx89AB = _mm_sub_ps(vi89AB, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vx0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vx4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vx89AB, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vx0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vx4567);
__m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vx89AB);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc5, vt0123), vc4);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc5, vt89AB), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc1);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc1);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0123 = _mm_mul_ps(vt0123, vs0123);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vt89AB = _mm_mul_ps(vt89AB, vs89AB);
__m128 vf0123 = _mm_add_ps(_mm_mul_ps(vt0123, vp0123), vs0123);
__m128 vf4567 = _mm_add_ps(_mm_mul_ps(vt4567, vp4567), vs4567);
__m128 vf89AB = _mm_add_ps(_mm_mul_ps(vt89AB, vp89AB), vs89AB);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0123 = _mm_andnot_ps(_mm_cmplt_ps(vx0123, vdenorm_cutoff), vf0123);
vf4567 = _mm_andnot_ps(_mm_cmplt_ps(vx4567, vdenorm_cutoff), vf4567);
vf89AB = _mm_andnot_ps(_mm_cmplt_ps(vx89AB, vdenorm_cutoff), vf89AB);
// Store 12 (3x4) outputs at a time.
_mm_storeu_ps(output, vf0123);
_mm_storeu_ps(output + 4, vf4567);
_mm_storeu_ps(output + 8, vf89AB);
output += 12;
// Accumulate computed exponents.
vacc0 = _mm_add_ps(vacc0, vf0123);
vacc0 = _mm_add_ps(vacc0, vf4567);
vacc0 = _mm_add_ps(vacc0, vf89AB);
}
__m128 vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
// Load 4 inputs at a time.
const __m128 vi = _mm_loadu_ps(input);
input += 4;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx = _mm_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm_mul_ps(vt, vs);
__m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf);
// Store 4 outputs at a time.
_mm_storeu_ps(output, vf);
output += 4;
// Accumulate computed exponents.
vacc = _mm_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
// Load 4 inputs at a time.
const __m128 vi = _mm_loadu_ps(input);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx = _mm_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm_mul_ps(vt, vs);
__m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf);
if (batch & (2 * sizeof(float))) {
// Store 2 outputs at a time.
_mm_storel_pi((__m64*) output, vf);
output += 2;
// Accumulate 2 computed exponents.
vacc = _mm_add_ps(vacc, _mm_movelh_ps(vf, _mm_setzero_ps()));
vf = _mm_movehl_ps(vf, vf);
}
if (batch & (1 * sizeof(float))) {
// Store 1 output at a time.
_mm_store_ss(output, vf);
// Accumulate 1 computed exponent.
vacc = _mm_add_ss(vacc, vf);
}
}
// Reduce 4 batch in the SIMD register
vacc = _mm_add_ps(vacc, _mm_movehl_ps(vacc, vacc));
vacc = _mm_add_ss(vacc, _mm_shuffle_ps(vacc, vacc, _MM_SHUFFLE(2, 3, 0, 1)));
_mm_store_ss(sum, vacc);
}
| 10,281 | 41.312757 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-sse2-rr2-p5-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/sse2-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x16_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m128 vi_max = _mm_load1_ps(max);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p5.magic_bias);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_lo);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p5.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p5.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p5.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p5.c2);
const __m128 vc1 = _mm_load_ps(params->sse2_rr2_p5.c1);
const __m128 vdenorm_cutoff = _mm_load_ps(params->sse2_rr2_p5.denorm_cutoff);
__m128 vacc0 = _mm_setzero_ps();
__m128 vacc1 = _mm_setzero_ps();
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 (4x4) inputs at a time.
const __m128 vi0123 = _mm_loadu_ps(input);
const __m128 vi4567 = _mm_loadu_ps(input + 4);
const __m128 vi89AB = _mm_loadu_ps(input + 8);
const __m128 viCDEF = _mm_loadu_ps(input + 12);
input += 16;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx0123 = _mm_sub_ps(vi0123, vi_max);
const __m128 vx4567 = _mm_sub_ps(vi4567, vi_max);
const __m128 vx89AB = _mm_sub_ps(vi89AB, vi_max);
const __m128 vxCDEF = _mm_sub_ps(viCDEF, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vx0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vx4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vx89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vxCDEF, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vx0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vx4567);
__m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vx89AB);
__m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vxCDEF);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc5, vt0123), vc4);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc5, vt89AB), vc4);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc5, vtCDEF), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc1);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc1);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc1);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0123 = _mm_mul_ps(vt0123, vs0123);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vt89AB = _mm_mul_ps(vt89AB, vs89AB);
vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
__m128 vf0123 = _mm_add_ps(_mm_mul_ps(vt0123, vp0123), vs0123);
__m128 vf4567 = _mm_add_ps(_mm_mul_ps(vt4567, vp4567), vs4567);
__m128 vf89AB = _mm_add_ps(_mm_mul_ps(vt89AB, vp89AB), vs89AB);
__m128 vfCDEF = _mm_add_ps(_mm_mul_ps(vtCDEF, vpCDEF), vsCDEF);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0123 = _mm_andnot_ps(_mm_cmplt_ps(vx0123, vdenorm_cutoff), vf0123);
vf4567 = _mm_andnot_ps(_mm_cmplt_ps(vx4567, vdenorm_cutoff), vf4567);
vf89AB = _mm_andnot_ps(_mm_cmplt_ps(vx89AB, vdenorm_cutoff), vf89AB);
vfCDEF = _mm_andnot_ps(_mm_cmplt_ps(vxCDEF, vdenorm_cutoff), vfCDEF);
// Store 16 (4x4) outputs at a time.
_mm_storeu_ps(output, vf0123);
_mm_storeu_ps(output + 4, vf4567);
_mm_storeu_ps(output + 8, vf89AB);
_mm_storeu_ps(output + 12, vfCDEF);
output += 16;
// Accumulate computed exponents.
vacc0 = _mm_add_ps(vacc0, vf0123);
vacc0 = _mm_add_ps(vacc0, vf4567);
vacc0 = _mm_add_ps(vacc0, vf89AB);
vacc0 = _mm_add_ps(vacc0, vfCDEF);
}
// Add up all accumulators to vacc0
vacc0 = _mm_add_ps(vacc0, vacc1);
__m128 vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
// Load 4 inputs at a time.
const __m128 vi = _mm_loadu_ps(input);
input += 4;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx = _mm_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm_mul_ps(vt, vs);
__m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf);
// Store 4 outputs at a time.
_mm_storeu_ps(output, vf);
output += 4;
// Accumulate computed exponents.
vacc = _mm_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
// Load 4 inputs at a time.
const __m128 vi = _mm_loadu_ps(input);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx = _mm_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm_mul_ps(vt, vs);
__m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf);
if (batch & (2 * sizeof(float))) {
// Store 2 outputs at a time.
_mm_storel_pi((__m64*) output, vf);
output += 2;
// Accumulate 2 computed exponents.
vacc = _mm_add_ps(vacc, _mm_movelh_ps(vf, _mm_setzero_ps()));
vf = _mm_movehl_ps(vf, vf);
}
if (batch & (1 * sizeof(float))) {
// Store 1 output at a time.
_mm_store_ss(output, vf);
// Accumulate 1 computed exponent.
vacc = _mm_add_ss(vacc, vf);
}
}
// Reduce 4 batch in the SIMD register
vacc = _mm_add_ps(vacc, _mm_movehl_ps(vacc, vacc));
vacc = _mm_add_ss(vacc, _mm_shuffle_ps(vacc, vacc, _MM_SHUFFLE(2, 3, 0, 1)));
_mm_store_ss(sum, vacc);
}
| 11,351 | 42.328244 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-sse2-rr2-p5-x16-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/sse2-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x16_acc4(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m128 vi_max = _mm_load1_ps(max);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p5.magic_bias);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_lo);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p5.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p5.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p5.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p5.c2);
const __m128 vc1 = _mm_load_ps(params->sse2_rr2_p5.c1);
const __m128 vdenorm_cutoff = _mm_load_ps(params->sse2_rr2_p5.denorm_cutoff);
__m128 vacc0 = _mm_setzero_ps();
__m128 vacc1 = _mm_setzero_ps();
__m128 vacc2 = _mm_setzero_ps();
__m128 vacc3 = _mm_setzero_ps();
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 (4x4) inputs at a time.
const __m128 vi0123 = _mm_loadu_ps(input);
const __m128 vi4567 = _mm_loadu_ps(input + 4);
const __m128 vi89AB = _mm_loadu_ps(input + 8);
const __m128 viCDEF = _mm_loadu_ps(input + 12);
input += 16;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx0123 = _mm_sub_ps(vi0123, vi_max);
const __m128 vx4567 = _mm_sub_ps(vi4567, vi_max);
const __m128 vx89AB = _mm_sub_ps(vi89AB, vi_max);
const __m128 vxCDEF = _mm_sub_ps(viCDEF, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vx0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vx4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vx89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vxCDEF, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vx0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vx4567);
__m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vx89AB);
__m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vxCDEF);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc5, vt0123), vc4);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc5, vt89AB), vc4);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc5, vtCDEF), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc1);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc1);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc1);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0123 = _mm_mul_ps(vt0123, vs0123);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vt89AB = _mm_mul_ps(vt89AB, vs89AB);
vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
__m128 vf0123 = _mm_add_ps(_mm_mul_ps(vt0123, vp0123), vs0123);
__m128 vf4567 = _mm_add_ps(_mm_mul_ps(vt4567, vp4567), vs4567);
__m128 vf89AB = _mm_add_ps(_mm_mul_ps(vt89AB, vp89AB), vs89AB);
__m128 vfCDEF = _mm_add_ps(_mm_mul_ps(vtCDEF, vpCDEF), vsCDEF);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0123 = _mm_andnot_ps(_mm_cmplt_ps(vx0123, vdenorm_cutoff), vf0123);
vf4567 = _mm_andnot_ps(_mm_cmplt_ps(vx4567, vdenorm_cutoff), vf4567);
vf89AB = _mm_andnot_ps(_mm_cmplt_ps(vx89AB, vdenorm_cutoff), vf89AB);
vfCDEF = _mm_andnot_ps(_mm_cmplt_ps(vxCDEF, vdenorm_cutoff), vfCDEF);
// Store 16 (4x4) outputs at a time.
_mm_storeu_ps(output, vf0123);
_mm_storeu_ps(output + 4, vf4567);
_mm_storeu_ps(output + 8, vf89AB);
_mm_storeu_ps(output + 12, vfCDEF);
output += 16;
// Accumulate computed exponents.
vacc0 = _mm_add_ps(vacc0, vf0123);
vacc0 = _mm_add_ps(vacc0, vf4567);
vacc0 = _mm_add_ps(vacc0, vf89AB);
vacc0 = _mm_add_ps(vacc0, vfCDEF);
}
// Add up all accumulators to vacc0
vacc0 = _mm_add_ps(vacc0, vacc1);
vacc2 = _mm_add_ps(vacc2, vacc3);
vacc0 = _mm_add_ps(vacc0, vacc2);
__m128 vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
// Load 4 inputs at a time.
const __m128 vi = _mm_loadu_ps(input);
input += 4;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx = _mm_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm_mul_ps(vt, vs);
__m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf);
// Store 4 outputs at a time.
_mm_storeu_ps(output, vf);
output += 4;
// Accumulate computed exponents.
vacc = _mm_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
// Load 4 inputs at a time.
const __m128 vi = _mm_loadu_ps(input);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx = _mm_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm_mul_ps(vt, vs);
__m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf);
if (batch & (2 * sizeof(float))) {
// Store 2 outputs at a time.
_mm_storel_pi((__m64*) output, vf);
output += 2;
// Accumulate 2 computed exponents.
vacc = _mm_add_ps(vacc, _mm_movelh_ps(vf, _mm_setzero_ps()));
vf = _mm_movehl_ps(vf, vf);
}
if (batch & (1 * sizeof(float))) {
// Store 1 output at a time.
_mm_store_ss(output, vf);
// Accumulate 1 computed exponent.
vacc = _mm_add_ss(vacc, vf);
}
}
// Reduce 4 batch in the SIMD register
vacc = _mm_add_ps(vacc, _mm_movehl_ps(vacc, vacc));
vacc = _mm_add_ss(vacc, _mm_shuffle_ps(vacc, vacc, _MM_SHUFFLE(2, 3, 0, 1)));
_mm_store_ss(sum, vacc);
}
| 11,493 | 42.210526 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-sse2-rr2-p5-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/sse2-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x16(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m128 vi_max = _mm_load1_ps(max);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p5.magic_bias);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_lo);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p5.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p5.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p5.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p5.c2);
const __m128 vc1 = _mm_load_ps(params->sse2_rr2_p5.c1);
const __m128 vdenorm_cutoff = _mm_load_ps(params->sse2_rr2_p5.denorm_cutoff);
__m128 vacc0 = _mm_setzero_ps();
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 (4x4) inputs at a time.
const __m128 vi0123 = _mm_loadu_ps(input);
const __m128 vi4567 = _mm_loadu_ps(input + 4);
const __m128 vi89AB = _mm_loadu_ps(input + 8);
const __m128 viCDEF = _mm_loadu_ps(input + 12);
input += 16;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx0123 = _mm_sub_ps(vi0123, vi_max);
const __m128 vx4567 = _mm_sub_ps(vi4567, vi_max);
const __m128 vx89AB = _mm_sub_ps(vi89AB, vi_max);
const __m128 vxCDEF = _mm_sub_ps(viCDEF, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vx0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vx4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vx89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vxCDEF, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vx0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vx4567);
__m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vx89AB);
__m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vxCDEF);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc5, vt0123), vc4);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc5, vt89AB), vc4);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc5, vtCDEF), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc1);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc1);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc1);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0123 = _mm_mul_ps(vt0123, vs0123);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vt89AB = _mm_mul_ps(vt89AB, vs89AB);
vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
__m128 vf0123 = _mm_add_ps(_mm_mul_ps(vt0123, vp0123), vs0123);
__m128 vf4567 = _mm_add_ps(_mm_mul_ps(vt4567, vp4567), vs4567);
__m128 vf89AB = _mm_add_ps(_mm_mul_ps(vt89AB, vp89AB), vs89AB);
__m128 vfCDEF = _mm_add_ps(_mm_mul_ps(vtCDEF, vpCDEF), vsCDEF);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0123 = _mm_andnot_ps(_mm_cmplt_ps(vx0123, vdenorm_cutoff), vf0123);
vf4567 = _mm_andnot_ps(_mm_cmplt_ps(vx4567, vdenorm_cutoff), vf4567);
vf89AB = _mm_andnot_ps(_mm_cmplt_ps(vx89AB, vdenorm_cutoff), vf89AB);
vfCDEF = _mm_andnot_ps(_mm_cmplt_ps(vxCDEF, vdenorm_cutoff), vfCDEF);
// Store 16 (4x4) outputs at a time.
_mm_storeu_ps(output, vf0123);
_mm_storeu_ps(output + 4, vf4567);
_mm_storeu_ps(output + 8, vf89AB);
_mm_storeu_ps(output + 12, vfCDEF);
output += 16;
// Accumulate computed exponents.
vacc0 = _mm_add_ps(vacc0, vf0123);
vacc0 = _mm_add_ps(vacc0, vf4567);
vacc0 = _mm_add_ps(vacc0, vf89AB);
vacc0 = _mm_add_ps(vacc0, vfCDEF);
}
__m128 vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
// Load 4 inputs at a time.
const __m128 vi = _mm_loadu_ps(input);
input += 4;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx = _mm_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm_mul_ps(vt, vs);
__m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf);
// Store 4 outputs at a time.
_mm_storeu_ps(output, vf);
output += 4;
// Accumulate computed exponents.
vacc = _mm_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
// Load 4 inputs at a time.
const __m128 vi = _mm_loadu_ps(input);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx = _mm_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm_mul_ps(vt, vs);
__m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf);
if (batch & (2 * sizeof(float))) {
// Store 2 outputs at a time.
_mm_storel_pi((__m64*) output, vf);
output += 2;
// Accumulate 2 computed exponents.
vacc = _mm_add_ps(vacc, _mm_movelh_ps(vf, _mm_setzero_ps()));
vf = _mm_movehl_ps(vf, vf);
}
if (batch & (1 * sizeof(float))) {
// Store 1 output at a time.
_mm_store_ss(output, vf);
// Accumulate 1 computed exponent.
vacc = _mm_add_ss(vacc, vf);
}
}
// Reduce 4 batch in the SIMD register
vacc = _mm_add_ps(vacc, _mm_movehl_ps(vacc, vacc));
vacc = _mm_add_ss(vacc, _mm_shuffle_ps(vacc, vacc, _MM_SHUFFLE(2, 3, 0, 1)));
_mm_store_ss(sum, vacc);
}
| 11,237 | 42.389961 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-sse2-rr2-p5-x20-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/sse2-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x20_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m128 vi_max = _mm_load1_ps(max);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p5.magic_bias);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_lo);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p5.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p5.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p5.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p5.c2);
const __m128 vc1 = _mm_load_ps(params->sse2_rr2_p5.c1);
const __m128 vdenorm_cutoff = _mm_load_ps(params->sse2_rr2_p5.denorm_cutoff);
__m128 vacc0 = _mm_setzero_ps();
__m128 vacc1 = _mm_setzero_ps();
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
// Load 20 (5x4) inputs at a time.
const __m128 vi0123 = _mm_loadu_ps(input);
const __m128 vi4567 = _mm_loadu_ps(input + 4);
const __m128 vi89AB = _mm_loadu_ps(input + 8);
const __m128 viCDEF = _mm_loadu_ps(input + 12);
const __m128 viGHIJ = _mm_loadu_ps(input + 16);
input += 20;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx0123 = _mm_sub_ps(vi0123, vi_max);
const __m128 vx4567 = _mm_sub_ps(vi4567, vi_max);
const __m128 vx89AB = _mm_sub_ps(vi89AB, vi_max);
const __m128 vxCDEF = _mm_sub_ps(viCDEF, vi_max);
const __m128 vxGHIJ = _mm_sub_ps(viGHIJ, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vx0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vx4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vx89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vxCDEF, vlog2e), vmagic_bias);
__m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vxGHIJ, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
const __m128 vsGHIJ = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnGHIJ), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vx0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vx4567);
__m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vx89AB);
__m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vxCDEF);
__m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_hi), vxGHIJ);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_lo), vtGHIJ);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc5, vt0123), vc4);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc5, vt89AB), vc4);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc5, vtCDEF), vc4);
__m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc5, vtGHIJ), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc2);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc1);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc1);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc1);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc1);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0123 = _mm_mul_ps(vt0123, vs0123);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vt89AB = _mm_mul_ps(vt89AB, vs89AB);
vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
vtGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
__m128 vf0123 = _mm_add_ps(_mm_mul_ps(vt0123, vp0123), vs0123);
__m128 vf4567 = _mm_add_ps(_mm_mul_ps(vt4567, vp4567), vs4567);
__m128 vf89AB = _mm_add_ps(_mm_mul_ps(vt89AB, vp89AB), vs89AB);
__m128 vfCDEF = _mm_add_ps(_mm_mul_ps(vtCDEF, vpCDEF), vsCDEF);
__m128 vfGHIJ = _mm_add_ps(_mm_mul_ps(vtGHIJ, vpGHIJ), vsGHIJ);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0123 = _mm_andnot_ps(_mm_cmplt_ps(vx0123, vdenorm_cutoff), vf0123);
vf4567 = _mm_andnot_ps(_mm_cmplt_ps(vx4567, vdenorm_cutoff), vf4567);
vf89AB = _mm_andnot_ps(_mm_cmplt_ps(vx89AB, vdenorm_cutoff), vf89AB);
vfCDEF = _mm_andnot_ps(_mm_cmplt_ps(vxCDEF, vdenorm_cutoff), vfCDEF);
vfGHIJ = _mm_andnot_ps(_mm_cmplt_ps(vxGHIJ, vdenorm_cutoff), vfGHIJ);
// Store 20 (5x4) outputs at a time.
_mm_storeu_ps(output, vf0123);
_mm_storeu_ps(output + 4, vf4567);
_mm_storeu_ps(output + 8, vf89AB);
_mm_storeu_ps(output + 12, vfCDEF);
_mm_storeu_ps(output + 16, vfGHIJ);
output += 20;
// Accumulate computed exponents.
vacc0 = _mm_add_ps(vacc0, vf0123);
vacc0 = _mm_add_ps(vacc0, vf4567);
vacc0 = _mm_add_ps(vacc0, vf89AB);
vacc0 = _mm_add_ps(vacc0, vfCDEF);
vacc0 = _mm_add_ps(vacc0, vfGHIJ);
}
// Add up all accumulators to vacc0
vacc0 = _mm_add_ps(vacc0, vacc1);
__m128 vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
// Load 4 inputs at a time.
const __m128 vi = _mm_loadu_ps(input);
input += 4;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx = _mm_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm_mul_ps(vt, vs);
__m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf);
// Store 4 outputs at a time.
_mm_storeu_ps(output, vf);
output += 4;
// Accumulate computed exponents.
vacc = _mm_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
// Load 4 inputs at a time.
const __m128 vi = _mm_loadu_ps(input);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx = _mm_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm_mul_ps(vt, vs);
__m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf);
if (batch & (2 * sizeof(float))) {
// Store 2 outputs at a time.
_mm_storel_pi((__m64*) output, vf);
output += 2;
// Accumulate 2 computed exponents.
vacc = _mm_add_ps(vacc, _mm_movelh_ps(vf, _mm_setzero_ps()));
vf = _mm_movehl_ps(vf, vf);
}
if (batch & (1 * sizeof(float))) {
// Store 1 output at a time.
_mm_store_ss(output, vf);
// Accumulate 1 computed exponent.
vacc = _mm_add_ss(vacc, vf);
}
}
// Reduce 4 batch in the SIMD register
vacc = _mm_add_ps(vacc, _mm_movehl_ps(vacc, vacc));
vacc = _mm_add_ss(vacc, _mm_shuffle_ps(vacc, vacc, _MM_SHUFFLE(2, 3, 0, 1)));
_mm_store_ss(sum, vacc);
}
| 12,307 | 43.273381 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-sse2-rr2-p5-x20-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/sse2-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x20_acc5(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m128 vi_max = _mm_load1_ps(max);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p5.magic_bias);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_lo);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p5.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p5.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p5.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p5.c2);
const __m128 vc1 = _mm_load_ps(params->sse2_rr2_p5.c1);
const __m128 vdenorm_cutoff = _mm_load_ps(params->sse2_rr2_p5.denorm_cutoff);
__m128 vacc0 = _mm_setzero_ps();
__m128 vacc1 = _mm_setzero_ps();
__m128 vacc2 = _mm_setzero_ps();
__m128 vacc3 = _mm_setzero_ps();
__m128 vacc4 = _mm_setzero_ps();
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
// Load 20 (5x4) inputs at a time.
const __m128 vi0123 = _mm_loadu_ps(input);
const __m128 vi4567 = _mm_loadu_ps(input + 4);
const __m128 vi89AB = _mm_loadu_ps(input + 8);
const __m128 viCDEF = _mm_loadu_ps(input + 12);
const __m128 viGHIJ = _mm_loadu_ps(input + 16);
input += 20;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx0123 = _mm_sub_ps(vi0123, vi_max);
const __m128 vx4567 = _mm_sub_ps(vi4567, vi_max);
const __m128 vx89AB = _mm_sub_ps(vi89AB, vi_max);
const __m128 vxCDEF = _mm_sub_ps(viCDEF, vi_max);
const __m128 vxGHIJ = _mm_sub_ps(viGHIJ, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vx0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vx4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vx89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vxCDEF, vlog2e), vmagic_bias);
__m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vxGHIJ, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
const __m128 vsGHIJ = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnGHIJ), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vx0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vx4567);
__m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vx89AB);
__m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vxCDEF);
__m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_hi), vxGHIJ);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_lo), vtGHIJ);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc5, vt0123), vc4);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc5, vt89AB), vc4);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc5, vtCDEF), vc4);
__m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc5, vtGHIJ), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc2);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc1);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc1);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc1);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc1);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0123 = _mm_mul_ps(vt0123, vs0123);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vt89AB = _mm_mul_ps(vt89AB, vs89AB);
vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
vtGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
__m128 vf0123 = _mm_add_ps(_mm_mul_ps(vt0123, vp0123), vs0123);
__m128 vf4567 = _mm_add_ps(_mm_mul_ps(vt4567, vp4567), vs4567);
__m128 vf89AB = _mm_add_ps(_mm_mul_ps(vt89AB, vp89AB), vs89AB);
__m128 vfCDEF = _mm_add_ps(_mm_mul_ps(vtCDEF, vpCDEF), vsCDEF);
__m128 vfGHIJ = _mm_add_ps(_mm_mul_ps(vtGHIJ, vpGHIJ), vsGHIJ);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0123 = _mm_andnot_ps(_mm_cmplt_ps(vx0123, vdenorm_cutoff), vf0123);
vf4567 = _mm_andnot_ps(_mm_cmplt_ps(vx4567, vdenorm_cutoff), vf4567);
vf89AB = _mm_andnot_ps(_mm_cmplt_ps(vx89AB, vdenorm_cutoff), vf89AB);
vfCDEF = _mm_andnot_ps(_mm_cmplt_ps(vxCDEF, vdenorm_cutoff), vfCDEF);
vfGHIJ = _mm_andnot_ps(_mm_cmplt_ps(vxGHIJ, vdenorm_cutoff), vfGHIJ);
// Store 20 (5x4) outputs at a time.
_mm_storeu_ps(output, vf0123);
_mm_storeu_ps(output + 4, vf4567);
_mm_storeu_ps(output + 8, vf89AB);
_mm_storeu_ps(output + 12, vfCDEF);
_mm_storeu_ps(output + 16, vfGHIJ);
output += 20;
// Accumulate computed exponents.
vacc0 = _mm_add_ps(vacc0, vf0123);
vacc4 = _mm_add_ps(vacc4, vf4567);
vacc3 = _mm_add_ps(vacc3, vf89AB);
vacc2 = _mm_add_ps(vacc2, vfCDEF);
vacc1 = _mm_add_ps(vacc1, vfGHIJ);
}
// Add up all accumulators to vacc0
vacc0 = _mm_add_ps(vacc0, vacc1);
vacc2 = _mm_add_ps(vacc2, vacc3);
vacc0 = _mm_add_ps(vacc0, vacc2);
vacc0 = _mm_add_ps(vacc0, vacc4);
__m128 vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
// Load 4 inputs at a time.
const __m128 vi = _mm_loadu_ps(input);
input += 4;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx = _mm_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm_mul_ps(vt, vs);
__m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf);
// Store 4 outputs at a time.
_mm_storeu_ps(output, vf);
output += 4;
// Accumulate computed exponents.
vacc = _mm_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
// Load 4 inputs at a time.
const __m128 vi = _mm_loadu_ps(input);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx = _mm_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm_mul_ps(vt, vs);
__m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf);
if (batch & (2 * sizeof(float))) {
// Store 2 outputs at a time.
_mm_storel_pi((__m64*) output, vf);
output += 2;
// Accumulate 2 computed exponents.
vacc = _mm_add_ps(vacc, _mm_movelh_ps(vf, _mm_setzero_ps()));
vf = _mm_movehl_ps(vf, vf);
}
if (batch & (1 * sizeof(float))) {
// Store 1 output at a time.
_mm_store_ss(output, vf);
// Accumulate 1 computed exponent.
vacc = _mm_add_ss(vacc, vf);
}
}
// Reduce 4 batch in the SIMD register
vacc = _mm_add_ps(vacc, _mm_movehl_ps(vacc, vacc));
vacc = _mm_add_ss(vacc, _mm_shuffle_ps(vacc, vacc, _MM_SHUFFLE(2, 3, 0, 1)));
_mm_store_ss(sum, vacc);
}
| 12,520 | 43.088028 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-sse2-rr2-p5-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/sse2-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x20(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m128 vi_max = _mm_load1_ps(max);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p5.magic_bias);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_lo);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p5.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p5.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p5.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p5.c2);
const __m128 vc1 = _mm_load_ps(params->sse2_rr2_p5.c1);
const __m128 vdenorm_cutoff = _mm_load_ps(params->sse2_rr2_p5.denorm_cutoff);
__m128 vacc0 = _mm_setzero_ps();
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
// Load 20 (5x4) inputs at a time.
const __m128 vi0123 = _mm_loadu_ps(input);
const __m128 vi4567 = _mm_loadu_ps(input + 4);
const __m128 vi89AB = _mm_loadu_ps(input + 8);
const __m128 viCDEF = _mm_loadu_ps(input + 12);
const __m128 viGHIJ = _mm_loadu_ps(input + 16);
input += 20;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx0123 = _mm_sub_ps(vi0123, vi_max);
const __m128 vx4567 = _mm_sub_ps(vi4567, vi_max);
const __m128 vx89AB = _mm_sub_ps(vi89AB, vi_max);
const __m128 vxCDEF = _mm_sub_ps(viCDEF, vi_max);
const __m128 vxGHIJ = _mm_sub_ps(viGHIJ, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vx0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vx4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vx89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vxCDEF, vlog2e), vmagic_bias);
__m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vxGHIJ, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
const __m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
const __m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
const __m128 vsGHIJ = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnGHIJ), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vx0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vx4567);
__m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vx89AB);
__m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vxCDEF);
__m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_hi), vxGHIJ);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_lo), vtGHIJ);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc5, vt0123), vc4);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc5, vt89AB), vc4);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc5, vtCDEF), vc4);
__m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc5, vtGHIJ), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc2);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc1);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc1);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc1);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc1);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0123 = _mm_mul_ps(vt0123, vs0123);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vt89AB = _mm_mul_ps(vt89AB, vs89AB);
vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
vtGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
__m128 vf0123 = _mm_add_ps(_mm_mul_ps(vt0123, vp0123), vs0123);
__m128 vf4567 = _mm_add_ps(_mm_mul_ps(vt4567, vp4567), vs4567);
__m128 vf89AB = _mm_add_ps(_mm_mul_ps(vt89AB, vp89AB), vs89AB);
__m128 vfCDEF = _mm_add_ps(_mm_mul_ps(vtCDEF, vpCDEF), vsCDEF);
__m128 vfGHIJ = _mm_add_ps(_mm_mul_ps(vtGHIJ, vpGHIJ), vsGHIJ);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0123 = _mm_andnot_ps(_mm_cmplt_ps(vx0123, vdenorm_cutoff), vf0123);
vf4567 = _mm_andnot_ps(_mm_cmplt_ps(vx4567, vdenorm_cutoff), vf4567);
vf89AB = _mm_andnot_ps(_mm_cmplt_ps(vx89AB, vdenorm_cutoff), vf89AB);
vfCDEF = _mm_andnot_ps(_mm_cmplt_ps(vxCDEF, vdenorm_cutoff), vfCDEF);
vfGHIJ = _mm_andnot_ps(_mm_cmplt_ps(vxGHIJ, vdenorm_cutoff), vfGHIJ);
// Store 20 (5x4) outputs at a time.
_mm_storeu_ps(output, vf0123);
_mm_storeu_ps(output + 4, vf4567);
_mm_storeu_ps(output + 8, vf89AB);
_mm_storeu_ps(output + 12, vfCDEF);
_mm_storeu_ps(output + 16, vfGHIJ);
output += 20;
// Accumulate computed exponents.
vacc0 = _mm_add_ps(vacc0, vf0123);
vacc0 = _mm_add_ps(vacc0, vf4567);
vacc0 = _mm_add_ps(vacc0, vf89AB);
vacc0 = _mm_add_ps(vacc0, vfCDEF);
vacc0 = _mm_add_ps(vacc0, vfGHIJ);
}
__m128 vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
// Load 4 inputs at a time.
const __m128 vi = _mm_loadu_ps(input);
input += 4;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx = _mm_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm_mul_ps(vt, vs);
__m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf);
// Store 4 outputs at a time.
_mm_storeu_ps(output, vf);
output += 4;
// Accumulate computed exponents.
vacc = _mm_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
// Load 4 inputs at a time.
const __m128 vi = _mm_loadu_ps(input);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx = _mm_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm_mul_ps(vt, vs);
__m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf);
if (batch & (2 * sizeof(float))) {
// Store 2 outputs at a time.
_mm_storel_pi((__m64*) output, vf);
output += 2;
// Accumulate 2 computed exponents.
vacc = _mm_add_ps(vacc, _mm_movelh_ps(vf, _mm_setzero_ps()));
vf = _mm_movehl_ps(vf, vf);
}
if (batch & (1 * sizeof(float))) {
// Store 1 output at a time.
_mm_store_ss(output, vf);
// Accumulate 1 computed exponent.
vacc = _mm_add_ss(vacc, vf);
}
}
// Reduce 4 batch in the SIMD register
vacc = _mm_add_ps(vacc, _mm_movehl_ps(vacc, vacc));
vacc = _mm_add_ss(vacc, _mm_shuffle_ps(vacc, vacc, _MM_SHUFFLE(2, 3, 0, 1)));
_mm_store_ss(sum, vacc);
}
| 12,193 | 43.341818 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-sse2-rr2-p5-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/sse2-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x4(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m128 vi_max = _mm_load1_ps(max);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p5.magic_bias);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_lo);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p5.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p5.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p5.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p5.c2);
const __m128 vc1 = _mm_load_ps(params->sse2_rr2_p5.c1);
const __m128 vdenorm_cutoff = _mm_load_ps(params->sse2_rr2_p5.denorm_cutoff);
__m128 vacc0 = _mm_setzero_ps();
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
// Load 4 (1x4) inputs at a time.
const __m128 vi0123 = _mm_loadu_ps(input);
input += 4;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx0123 = _mm_sub_ps(vi0123, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vx0123, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vx0123);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc5, vt0123), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0123 = _mm_mul_ps(vt0123, vs0123);
__m128 vf0123 = _mm_add_ps(_mm_mul_ps(vt0123, vp0123), vs0123);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0123 = _mm_andnot_ps(_mm_cmplt_ps(vx0123, vdenorm_cutoff), vf0123);
// Store 4 (1x4) outputs at a time.
_mm_storeu_ps(output, vf0123);
output += 4;
// Accumulate computed exponents.
vacc0 = _mm_add_ps(vacc0, vf0123);
}
__m128 vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
// Load 4 inputs at a time.
const __m128 vi = _mm_loadu_ps(input);
input += 4;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx = _mm_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm_mul_ps(vt, vs);
__m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf);
// Store 4 outputs at a time.
_mm_storeu_ps(output, vf);
output += 4;
// Accumulate computed exponents.
vacc = _mm_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
// Load 4 inputs at a time.
const __m128 vi = _mm_loadu_ps(input);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx = _mm_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm_mul_ps(vt, vs);
__m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf);
if (batch & (2 * sizeof(float))) {
// Store 2 outputs at a time.
_mm_storel_pi((__m64*) output, vf);
output += 2;
// Accumulate 2 computed exponents.
vacc = _mm_add_ps(vacc, _mm_movelh_ps(vf, _mm_setzero_ps()));
vf = _mm_movehl_ps(vf, vf);
}
if (batch & (1 * sizeof(float))) {
// Store 1 output at a time.
_mm_store_ss(output, vf);
// Accumulate 1 computed exponent.
vacc = _mm_add_ss(vacc, vf);
}
}
// Reduce 4 batch in the SIMD register
vacc = _mm_add_ps(vacc, _mm_movehl_ps(vacc, vacc));
vacc = _mm_add_ss(vacc, _mm_shuffle_ps(vacc, vacc, _MM_SHUFFLE(2, 3, 0, 1)));
_mm_store_ss(sum, vacc);
}
| 8,366 | 38.654028 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-sse2-rr2-p5-x8-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/sse2-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x8_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m128 vi_max = _mm_load1_ps(max);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p5.magic_bias);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_lo);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p5.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p5.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p5.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p5.c2);
const __m128 vc1 = _mm_load_ps(params->sse2_rr2_p5.c1);
const __m128 vdenorm_cutoff = _mm_load_ps(params->sse2_rr2_p5.denorm_cutoff);
__m128 vacc0 = _mm_setzero_ps();
__m128 vacc1 = _mm_setzero_ps();
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 (2x4) inputs at a time.
const __m128 vi0123 = _mm_loadu_ps(input);
const __m128 vi4567 = _mm_loadu_ps(input + 4);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx0123 = _mm_sub_ps(vi0123, vi_max);
const __m128 vx4567 = _mm_sub_ps(vi4567, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vx0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vx4567, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vx0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vx4567);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc5, vt0123), vc4);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc1);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0123 = _mm_mul_ps(vt0123, vs0123);
vt4567 = _mm_mul_ps(vt4567, vs4567);
__m128 vf0123 = _mm_add_ps(_mm_mul_ps(vt0123, vp0123), vs0123);
__m128 vf4567 = _mm_add_ps(_mm_mul_ps(vt4567, vp4567), vs4567);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0123 = _mm_andnot_ps(_mm_cmplt_ps(vx0123, vdenorm_cutoff), vf0123);
vf4567 = _mm_andnot_ps(_mm_cmplt_ps(vx4567, vdenorm_cutoff), vf4567);
// Store 8 (2x4) outputs at a time.
_mm_storeu_ps(output, vf0123);
_mm_storeu_ps(output + 4, vf4567);
output += 8;
// Accumulate computed exponents.
vacc0 = _mm_add_ps(vacc0, vf0123);
vacc0 = _mm_add_ps(vacc0, vf4567);
}
// Add up all accumulators to vacc0
vacc0 = _mm_add_ps(vacc0, vacc1);
__m128 vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
// Load 4 inputs at a time.
const __m128 vi = _mm_loadu_ps(input);
input += 4;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx = _mm_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm_mul_ps(vt, vs);
__m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf);
// Store 4 outputs at a time.
_mm_storeu_ps(output, vf);
output += 4;
// Accumulate computed exponents.
vacc = _mm_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
// Load 4 inputs at a time.
const __m128 vi = _mm_loadu_ps(input);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx = _mm_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm_mul_ps(vt, vs);
__m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf);
if (batch & (2 * sizeof(float))) {
// Store 2 outputs at a time.
_mm_storel_pi((__m64*) output, vf);
output += 2;
// Accumulate 2 computed exponents.
vacc = _mm_add_ps(vacc, _mm_movelh_ps(vf, _mm_setzero_ps()));
vf = _mm_movehl_ps(vf, vf);
}
if (batch & (1 * sizeof(float))) {
// Store 1 output at a time.
_mm_store_ss(output, vf);
// Accumulate 1 computed exponent.
vacc = _mm_add_ss(vacc, vf);
}
}
// Reduce 4 batch in the SIMD register
vacc = _mm_add_ps(vacc, _mm_movehl_ps(vacc, vacc));
vacc = _mm_add_ss(vacc, _mm_shuffle_ps(vacc, vacc, _MM_SHUFFLE(2, 3, 0, 1)));
_mm_store_ss(sum, vacc);
}
| 9,434 | 40.021739 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-sse2-rr2-p5-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/sse2-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x8(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const __m128 vi_max = _mm_load1_ps(max);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p5.log2e);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p5.magic_bias);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p5.minus_ln2_lo);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p5.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p5.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p5.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p5.c2);
const __m128 vc1 = _mm_load_ps(params->sse2_rr2_p5.c1);
const __m128 vdenorm_cutoff = _mm_load_ps(params->sse2_rr2_p5.denorm_cutoff);
__m128 vacc0 = _mm_setzero_ps();
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 (2x4) inputs at a time.
const __m128 vi0123 = _mm_loadu_ps(input);
const __m128 vi4567 = _mm_loadu_ps(input + 4);
input += 8;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx0123 = _mm_sub_ps(vi0123, vi_max);
const __m128 vx4567 = _mm_sub_ps(vi4567, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vx0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vx4567, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
const __m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vx0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vx4567);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc5, vt0123), vc4);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc1);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt0123 = _mm_mul_ps(vt0123, vs0123);
vt4567 = _mm_mul_ps(vt4567, vs4567);
__m128 vf0123 = _mm_add_ps(_mm_mul_ps(vt0123, vp0123), vs0123);
__m128 vf4567 = _mm_add_ps(_mm_mul_ps(vt4567, vp4567), vs4567);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf0123 = _mm_andnot_ps(_mm_cmplt_ps(vx0123, vdenorm_cutoff), vf0123);
vf4567 = _mm_andnot_ps(_mm_cmplt_ps(vx4567, vdenorm_cutoff), vf4567);
// Store 8 (2x4) outputs at a time.
_mm_storeu_ps(output, vf0123);
_mm_storeu_ps(output + 4, vf4567);
output += 8;
// Accumulate computed exponents.
vacc0 = _mm_add_ps(vacc0, vf0123);
vacc0 = _mm_add_ps(vacc0, vf4567);
}
__m128 vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
// Load 4 inputs at a time.
const __m128 vi = _mm_loadu_ps(input);
input += 4;
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx = _mm_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm_mul_ps(vt, vs);
__m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf);
// Store 4 outputs at a time.
_mm_storeu_ps(output, vf);
output += 4;
// Accumulate computed exponents.
vacc = _mm_add_ps(vacc, vf);
}
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
// Load 4 inputs at a time.
const __m128 vi = _mm_loadu_ps(input);
// Subtract maximum input x := i - i_max. This implies x <= 0.
const __m128 vx = _mm_sub_ps(vi, vi_max);
// Compute reduced argument batch := round(x / log(2)).
__m128 vn = _mm_add_ps(_mm_mul_ps(vx, vlog2e), vmagic_bias);
// Create a floating-point number s (scale) such that s == 2**batch for inputs which don't cause underflow, i.e.
// -87.33642 <= x <= 0.0, and -126 <= batch <= 0 accordingly.
const __m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
// Subtract the large number back to get final batch := round(x / log(2)).
vn = _mm_sub_ps(vn, vmagic_bias);
// Compute reduced argument t := x - batch * log(2).
// Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vx);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
// Compute degree-5 polynomial approximation for exp(t) on [-log(2)/2, log(2)/2].
__m128 vp = _mm_add_ps(_mm_mul_ps(vc5, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc1);
// Reconstruct the final f value:
// f = s * (1 + t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5)))))
// = s + (t * s) * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))))
// = s + (t * s) * p
vt = _mm_mul_ps(vt, vs);
__m128 vf = _mm_add_ps(_mm_mul_ps(vt, vp), vs);
// For inputs below zero cutoff, replace output with +0.0f.
// Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
vf = _mm_andnot_ps(_mm_cmplt_ps(vx, vdenorm_cutoff), vf);
if (batch & (2 * sizeof(float))) {
// Store 2 outputs at a time.
_mm_storel_pi((__m64*) output, vf);
output += 2;
// Accumulate 2 computed exponents.
vacc = _mm_add_ps(vacc, _mm_movelh_ps(vf, _mm_setzero_ps()));
vf = _mm_movehl_ps(vf, vf);
}
if (batch & (1 * sizeof(float))) {
// Store 1 output at a time.
_mm_store_ss(output, vf);
// Accumulate 1 computed exponent.
vacc = _mm_add_ss(vacc, vf);
}
}
// Reduce 4 batch in the SIMD register
vacc = _mm_add_ps(vacc, _mm_movehl_ps(vacc, vacc));
vacc = _mm_add_ss(vacc, _mm_shuffle_ps(vacc, vacc, _MM_SHUFFLE(2, 3, 0, 1)));
_mm_store_ss(sum, vacc);
}
| 9,320 | 40.061674 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-wasmrelaxedsimd-rr2-p5-x12-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/wasmsimd-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_x12_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const v128_t vi_max = wasm_v128_load32_splat(max);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
v128_t vacc1 = vacc0;
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
// Load 12 (3x4) inputs at a time.
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
input += 12;
const v128_t vx0123 = wasm_f32x4_sub(vi0123, vi_max);
const v128_t vx4567 = wasm_f32x4_sub(vi4567, vi_max);
const v128_t vx89AB = wasm_f32x4_sub(vi89AB, vi_max);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vx0123, vlog2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vx4567, vlog2e, vmagic_bias);
v128_t vn89AB = __builtin_wasm_relaxed_madd_f32x4(vx89AB, vlog2e, vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_hi, vx0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_hi, vx4567);
v128_t vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_hi, vx89AB);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_lo, vt4567);
vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_lo, vt89AB);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vc5, vt0123, vc4);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vc5, vt4567, vc4);
v128_t vp89AB = __builtin_wasm_relaxed_madd_f32x4(vc5, vt89AB, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc3);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc2);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc2);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc1);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc1);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
v128_t vf0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vs0123);
v128_t vf4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vs4567);
v128_t vf89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vs89AB);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_lt(vx0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_lt(vx4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_lt(vx89AB, vdenorm_cutoff));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
output += 12;
vacc0 = wasm_f32x4_add(vacc0, vf0123);
vacc0 = wasm_f32x4_add(vacc0, vf4567);
vacc0 = wasm_f32x4_add(vacc0, vf89AB);
}
// Add up all accumulators to vacc0
vacc0 = wasm_f32x4_add(vacc0, vacc1);
v128_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vi = wasm_v128_load(input);
input += 4;
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vx, vlog2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vx);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc5, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
wasm_v128_store(output, vf);
output += 4;
vacc = wasm_f32x4_add(vacc, vf);
}
vacc = wasm_f32x4_add(vacc, wasm_v64x2_shuffle(vacc, vacc, 1, 1));
float vsum = wasm_f32x4_extract_lane(vacc, 0) + wasm_f32x4_extract_lane(vacc, 1);
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const v128_t vi = wasm_v128_load(input);
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vx, vlog2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vx);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc5, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
output += 2;
vsum += wasm_f32x4_extract_lane(vf, 0) + wasm_f32x4_extract_lane(vf, 1);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
vsum += wasm_f32x4_extract_lane(vf, 0);
}
}
*sum = vsum;
}
| 7,564 | 38.401042 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-wasmrelaxedsimd-rr2-p5-x12-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/wasmsimd-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_x12_acc3(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const v128_t vi_max = wasm_v128_load32_splat(max);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
v128_t vacc1 = vacc0;
v128_t vacc2 = vacc0;
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
// Load 12 (3x4) inputs at a time.
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
input += 12;
const v128_t vx0123 = wasm_f32x4_sub(vi0123, vi_max);
const v128_t vx4567 = wasm_f32x4_sub(vi4567, vi_max);
const v128_t vx89AB = wasm_f32x4_sub(vi89AB, vi_max);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vx0123, vlog2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vx4567, vlog2e, vmagic_bias);
v128_t vn89AB = __builtin_wasm_relaxed_madd_f32x4(vx89AB, vlog2e, vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_hi, vx0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_hi, vx4567);
v128_t vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_hi, vx89AB);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_lo, vt4567);
vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_lo, vt89AB);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vc5, vt0123, vc4);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vc5, vt4567, vc4);
v128_t vp89AB = __builtin_wasm_relaxed_madd_f32x4(vc5, vt89AB, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc3);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc2);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc2);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc1);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc1);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
v128_t vf0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vs0123);
v128_t vf4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vs4567);
v128_t vf89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vs89AB);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_lt(vx0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_lt(vx4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_lt(vx89AB, vdenorm_cutoff));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
output += 12;
vacc0 = wasm_f32x4_add(vacc0, vf0123);
vacc1 = wasm_f32x4_add(vacc1, vf4567);
vacc2 = wasm_f32x4_add(vacc2, vf89AB);
}
// Add up all accumulators to vacc0
vacc0 = wasm_f32x4_add(vacc0, vacc1);
vacc0 = wasm_f32x4_add(vacc0, vacc2);
v128_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vi = wasm_v128_load(input);
input += 4;
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vx, vlog2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vx);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc5, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
wasm_v128_store(output, vf);
output += 4;
vacc = wasm_f32x4_add(vacc, vf);
}
vacc = wasm_f32x4_add(vacc, wasm_v64x2_shuffle(vacc, vacc, 1, 1));
float vsum = wasm_f32x4_extract_lane(vacc, 0) + wasm_f32x4_extract_lane(vacc, 1);
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const v128_t vi = wasm_v128_load(input);
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vx, vlog2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vx);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc5, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
output += 2;
vsum += wasm_f32x4_extract_lane(vf, 0) + wasm_f32x4_extract_lane(vf, 1);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
vsum += wasm_f32x4_extract_lane(vf, 0);
}
}
*sum = vsum;
}
| 7,628 | 38.324742 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-wasmrelaxedsimd-rr2-p5-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/wasmsimd-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_x12(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const v128_t vi_max = wasm_v128_load32_splat(max);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
// Load 12 (3x4) inputs at a time.
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
input += 12;
const v128_t vx0123 = wasm_f32x4_sub(vi0123, vi_max);
const v128_t vx4567 = wasm_f32x4_sub(vi4567, vi_max);
const v128_t vx89AB = wasm_f32x4_sub(vi89AB, vi_max);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vx0123, vlog2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vx4567, vlog2e, vmagic_bias);
v128_t vn89AB = __builtin_wasm_relaxed_madd_f32x4(vx89AB, vlog2e, vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_hi, vx0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_hi, vx4567);
v128_t vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_hi, vx89AB);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_lo, vt4567);
vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_lo, vt89AB);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vc5, vt0123, vc4);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vc5, vt4567, vc4);
v128_t vp89AB = __builtin_wasm_relaxed_madd_f32x4(vc5, vt89AB, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc3);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc2);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc2);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc1);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc1);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
v128_t vf0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vs0123);
v128_t vf4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vs4567);
v128_t vf89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vs89AB);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_lt(vx0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_lt(vx4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_lt(vx89AB, vdenorm_cutoff));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
output += 12;
vacc0 = wasm_f32x4_add(vacc0, vf0123);
vacc0 = wasm_f32x4_add(vacc0, vf4567);
vacc0 = wasm_f32x4_add(vacc0, vf89AB);
}
v128_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vi = wasm_v128_load(input);
input += 4;
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vx, vlog2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vx);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc5, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
wasm_v128_store(output, vf);
output += 4;
vacc = wasm_f32x4_add(vacc, vf);
}
vacc = wasm_f32x4_add(vacc, wasm_v64x2_shuffle(vacc, vacc, 1, 1));
float vsum = wasm_f32x4_extract_lane(vacc, 0) + wasm_f32x4_extract_lane(vacc, 1);
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const v128_t vi = wasm_v128_load(input);
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vx, vlog2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vx);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc5, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
output += 2;
vsum += wasm_f32x4_extract_lane(vf, 0) + wasm_f32x4_extract_lane(vf, 1);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
vsum += wasm_f32x4_extract_lane(vf, 0);
}
}
*sum = vsum;
}
| 7,457 | 38.460317 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-wasmrelaxedsimd-rr2-p5-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/wasmsimd-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_x16_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const v128_t vi_max = wasm_v128_load32_splat(max);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
v128_t vacc1 = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 (4x4) inputs at a time.
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
input += 16;
const v128_t vx0123 = wasm_f32x4_sub(vi0123, vi_max);
const v128_t vx4567 = wasm_f32x4_sub(vi4567, vi_max);
const v128_t vx89AB = wasm_f32x4_sub(vi89AB, vi_max);
const v128_t vxCDEF = wasm_f32x4_sub(viCDEF, vi_max);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vx0123, vlog2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vx4567, vlog2e, vmagic_bias);
v128_t vn89AB = __builtin_wasm_relaxed_madd_f32x4(vx89AB, vlog2e, vmagic_bias);
v128_t vnCDEF = __builtin_wasm_relaxed_madd_f32x4(vxCDEF, vlog2e, vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_hi, vx0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_hi, vx4567);
v128_t vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_hi, vx89AB);
v128_t vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vminus_ln2_hi, vxCDEF);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_lo, vt4567);
vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_lo, vt89AB);
vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vminus_ln2_lo, vtCDEF);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vc5, vt0123, vc4);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vc5, vt4567, vc4);
v128_t vp89AB = __builtin_wasm_relaxed_madd_f32x4(vc5, vt89AB, vc4);
v128_t vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vc5, vtCDEF, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc3);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc3);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc2);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc2);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc2);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc1);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc1);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc1);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
v128_t vf0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vs0123);
v128_t vf4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vs4567);
v128_t vf89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vs89AB);
v128_t vfCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vsCDEF);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_lt(vx0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_lt(vx4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_lt(vx89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_lt(vxCDEF, vdenorm_cutoff));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
output += 16;
vacc0 = wasm_f32x4_add(vacc0, vf0123);
vacc0 = wasm_f32x4_add(vacc0, vf4567);
vacc0 = wasm_f32x4_add(vacc0, vf89AB);
vacc0 = wasm_f32x4_add(vacc0, vfCDEF);
}
// Add up all accumulators to vacc0
vacc0 = wasm_f32x4_add(vacc0, vacc1);
v128_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vi = wasm_v128_load(input);
input += 4;
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vx, vlog2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vx);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc5, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
wasm_v128_store(output, vf);
output += 4;
vacc = wasm_f32x4_add(vacc, vf);
}
vacc = wasm_f32x4_add(vacc, wasm_v64x2_shuffle(vacc, vacc, 1, 1));
float vsum = wasm_f32x4_extract_lane(vacc, 0) + wasm_f32x4_extract_lane(vacc, 1);
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const v128_t vi = wasm_v128_load(input);
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vx, vlog2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vx);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc5, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
output += 2;
vsum += wasm_f32x4_extract_lane(vf, 0) + wasm_f32x4_extract_lane(vf, 1);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
vsum += wasm_f32x4_extract_lane(vf, 0);
}
}
*sum = vsum;
}
| 8,596 | 40.331731 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-wasmrelaxedsimd-rr2-p5-x16-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/wasmsimd-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_x16_acc4(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const v128_t vi_max = wasm_v128_load32_splat(max);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
v128_t vacc1 = vacc0;
v128_t vacc2 = vacc0;
v128_t vacc3 = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 (4x4) inputs at a time.
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
input += 16;
const v128_t vx0123 = wasm_f32x4_sub(vi0123, vi_max);
const v128_t vx4567 = wasm_f32x4_sub(vi4567, vi_max);
const v128_t vx89AB = wasm_f32x4_sub(vi89AB, vi_max);
const v128_t vxCDEF = wasm_f32x4_sub(viCDEF, vi_max);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vx0123, vlog2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vx4567, vlog2e, vmagic_bias);
v128_t vn89AB = __builtin_wasm_relaxed_madd_f32x4(vx89AB, vlog2e, vmagic_bias);
v128_t vnCDEF = __builtin_wasm_relaxed_madd_f32x4(vxCDEF, vlog2e, vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_hi, vx0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_hi, vx4567);
v128_t vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_hi, vx89AB);
v128_t vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vminus_ln2_hi, vxCDEF);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_lo, vt4567);
vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_lo, vt89AB);
vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vminus_ln2_lo, vtCDEF);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vc5, vt0123, vc4);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vc5, vt4567, vc4);
v128_t vp89AB = __builtin_wasm_relaxed_madd_f32x4(vc5, vt89AB, vc4);
v128_t vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vc5, vtCDEF, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc3);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc3);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc2);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc2);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc2);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc1);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc1);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc1);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
v128_t vf0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vs0123);
v128_t vf4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vs4567);
v128_t vf89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vs89AB);
v128_t vfCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vsCDEF);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_lt(vx0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_lt(vx4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_lt(vx89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_lt(vxCDEF, vdenorm_cutoff));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
output += 16;
vacc0 = wasm_f32x4_add(vacc0, vf0123);
vacc0 = wasm_f32x4_add(vacc0, vf4567);
vacc0 = wasm_f32x4_add(vacc0, vf89AB);
vacc0 = wasm_f32x4_add(vacc0, vfCDEF);
}
// Add up all accumulators to vacc0
vacc0 = wasm_f32x4_add(vacc0, vacc1);
vacc2 = wasm_f32x4_add(vacc2, vacc3);
vacc0 = wasm_f32x4_add(vacc0, vacc2);
v128_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vi = wasm_v128_load(input);
input += 4;
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vx, vlog2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vx);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc5, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
wasm_v128_store(output, vf);
output += 4;
vacc = wasm_f32x4_add(vacc, vf);
}
vacc = wasm_f32x4_add(vacc, wasm_v64x2_shuffle(vacc, vacc, 1, 1));
float vsum = wasm_f32x4_extract_lane(vacc, 0) + wasm_f32x4_extract_lane(vacc, 1);
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const v128_t vi = wasm_v128_load(input);
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vx, vlog2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vx);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc5, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
output += 2;
vsum += wasm_f32x4_extract_lane(vf, 0) + wasm_f32x4_extract_lane(vf, 1);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
vsum += wasm_f32x4_extract_lane(vf, 0);
}
}
*sum = vsum;
}
| 8,724 | 40.15566 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-wasmrelaxedsimd-rr2-p5-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/wasmsimd-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_x16(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const v128_t vi_max = wasm_v128_load32_splat(max);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 (4x4) inputs at a time.
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
input += 16;
const v128_t vx0123 = wasm_f32x4_sub(vi0123, vi_max);
const v128_t vx4567 = wasm_f32x4_sub(vi4567, vi_max);
const v128_t vx89AB = wasm_f32x4_sub(vi89AB, vi_max);
const v128_t vxCDEF = wasm_f32x4_sub(viCDEF, vi_max);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vx0123, vlog2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vx4567, vlog2e, vmagic_bias);
v128_t vn89AB = __builtin_wasm_relaxed_madd_f32x4(vx89AB, vlog2e, vmagic_bias);
v128_t vnCDEF = __builtin_wasm_relaxed_madd_f32x4(vxCDEF, vlog2e, vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_hi, vx0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_hi, vx4567);
v128_t vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_hi, vx89AB);
v128_t vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vminus_ln2_hi, vxCDEF);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_lo, vt4567);
vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_lo, vt89AB);
vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vminus_ln2_lo, vtCDEF);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vc5, vt0123, vc4);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vc5, vt4567, vc4);
v128_t vp89AB = __builtin_wasm_relaxed_madd_f32x4(vc5, vt89AB, vc4);
v128_t vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vc5, vtCDEF, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc3);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc3);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc2);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc2);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc2);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc1);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc1);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc1);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
v128_t vf0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vs0123);
v128_t vf4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vs4567);
v128_t vf89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vs89AB);
v128_t vfCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vsCDEF);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_lt(vx0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_lt(vx4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_lt(vx89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_lt(vxCDEF, vdenorm_cutoff));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
output += 16;
vacc0 = wasm_f32x4_add(vacc0, vf0123);
vacc0 = wasm_f32x4_add(vacc0, vf4567);
vacc0 = wasm_f32x4_add(vacc0, vf89AB);
vacc0 = wasm_f32x4_add(vacc0, vfCDEF);
}
v128_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vi = wasm_v128_load(input);
input += 4;
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vx, vlog2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vx);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc5, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
wasm_v128_store(output, vf);
output += 4;
vacc = wasm_f32x4_add(vacc, vf);
}
vacc = wasm_f32x4_add(vacc, wasm_v64x2_shuffle(vacc, vacc, 1, 1));
float vsum = wasm_f32x4_extract_lane(vacc, 0) + wasm_f32x4_extract_lane(vacc, 1);
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const v128_t vi = wasm_v128_load(input);
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vx, vlog2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vx);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc5, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
output += 2;
vsum += wasm_f32x4_extract_lane(vf, 0) + wasm_f32x4_extract_lane(vf, 1);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
vsum += wasm_f32x4_extract_lane(vf, 0);
}
}
*sum = vsum;
}
| 8,489 | 40.414634 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-wasmrelaxedsimd-rr2-p5-x20-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/wasmsimd-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_x20_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const v128_t vi_max = wasm_v128_load32_splat(max);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
v128_t vacc1 = vacc0;
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
// Load 20 (5x4) inputs at a time.
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
const v128_t viGHIJ = wasm_v128_load(input + 16);
input += 20;
const v128_t vx0123 = wasm_f32x4_sub(vi0123, vi_max);
const v128_t vx4567 = wasm_f32x4_sub(vi4567, vi_max);
const v128_t vx89AB = wasm_f32x4_sub(vi89AB, vi_max);
const v128_t vxCDEF = wasm_f32x4_sub(viCDEF, vi_max);
const v128_t vxGHIJ = wasm_f32x4_sub(viGHIJ, vi_max);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vx0123, vlog2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vx4567, vlog2e, vmagic_bias);
v128_t vn89AB = __builtin_wasm_relaxed_madd_f32x4(vx89AB, vlog2e, vmagic_bias);
v128_t vnCDEF = __builtin_wasm_relaxed_madd_f32x4(vxCDEF, vlog2e, vmagic_bias);
v128_t vnGHIJ = __builtin_wasm_relaxed_madd_f32x4(vxGHIJ, vlog2e, vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
const v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_hi, vx0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_hi, vx4567);
v128_t vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_hi, vx89AB);
v128_t vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vminus_ln2_hi, vxCDEF);
v128_t vtGHIJ = __builtin_wasm_relaxed_madd_f32x4(vnGHIJ, vminus_ln2_hi, vxGHIJ);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_lo, vt4567);
vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_lo, vt89AB);
vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vminus_ln2_lo, vtCDEF);
vtGHIJ = __builtin_wasm_relaxed_madd_f32x4(vnGHIJ, vminus_ln2_lo, vtGHIJ);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vc5, vt0123, vc4);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vc5, vt4567, vc4);
v128_t vp89AB = __builtin_wasm_relaxed_madd_f32x4(vc5, vt89AB, vc4);
v128_t vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vc5, vtCDEF, vc4);
v128_t vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vc5, vtGHIJ, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc3);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc3);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc3);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vpGHIJ, vtGHIJ, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc2);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc2);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc2);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vpGHIJ, vtGHIJ, vc2);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc1);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc1);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc1);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc1);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vpGHIJ, vtGHIJ, vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
v128_t vf0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vs0123);
v128_t vf4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vs4567);
v128_t vf89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vs89AB);
v128_t vfCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vsCDEF);
v128_t vfGHIJ = __builtin_wasm_relaxed_madd_f32x4(vtGHIJ, vpGHIJ, vsGHIJ);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_lt(vx0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_lt(vx4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_lt(vx89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_lt(vxCDEF, vdenorm_cutoff));
vfGHIJ = wasm_v128_andnot(vfGHIJ, wasm_f32x4_lt(vxGHIJ, vdenorm_cutoff));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
wasm_v128_store(output + 16, vfGHIJ);
output += 20;
vacc0 = wasm_f32x4_add(vacc0, vf0123);
vacc0 = wasm_f32x4_add(vacc0, vf4567);
vacc0 = wasm_f32x4_add(vacc0, vf89AB);
vacc0 = wasm_f32x4_add(vacc0, vfCDEF);
vacc0 = wasm_f32x4_add(vacc0, vfGHIJ);
}
// Add up all accumulators to vacc0
vacc0 = wasm_f32x4_add(vacc0, vacc1);
v128_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vi = wasm_v128_load(input);
input += 4;
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vx, vlog2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vx);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc5, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
wasm_v128_store(output, vf);
output += 4;
vacc = wasm_f32x4_add(vacc, vf);
}
vacc = wasm_f32x4_add(vacc, wasm_v64x2_shuffle(vacc, vacc, 1, 1));
float vsum = wasm_f32x4_extract_lane(vacc, 0) + wasm_f32x4_extract_lane(vacc, 1);
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const v128_t vi = wasm_v128_load(input);
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vx, vlog2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vx);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc5, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
output += 2;
vsum += wasm_f32x4_extract_lane(vf, 0) + wasm_f32x4_extract_lane(vf, 1);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
vsum += wasm_f32x4_extract_lane(vf, 0);
}
}
*sum = vsum;
}
| 9,628 | 41.986607 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-wasmrelaxedsimd-rr2-p5-x20-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/wasmsimd-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_x20_acc5(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const v128_t vi_max = wasm_v128_load32_splat(max);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
v128_t vacc1 = vacc0;
v128_t vacc2 = vacc0;
v128_t vacc3 = vacc0;
v128_t vacc4 = vacc0;
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
// Load 20 (5x4) inputs at a time.
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
const v128_t viGHIJ = wasm_v128_load(input + 16);
input += 20;
const v128_t vx0123 = wasm_f32x4_sub(vi0123, vi_max);
const v128_t vx4567 = wasm_f32x4_sub(vi4567, vi_max);
const v128_t vx89AB = wasm_f32x4_sub(vi89AB, vi_max);
const v128_t vxCDEF = wasm_f32x4_sub(viCDEF, vi_max);
const v128_t vxGHIJ = wasm_f32x4_sub(viGHIJ, vi_max);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vx0123, vlog2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vx4567, vlog2e, vmagic_bias);
v128_t vn89AB = __builtin_wasm_relaxed_madd_f32x4(vx89AB, vlog2e, vmagic_bias);
v128_t vnCDEF = __builtin_wasm_relaxed_madd_f32x4(vxCDEF, vlog2e, vmagic_bias);
v128_t vnGHIJ = __builtin_wasm_relaxed_madd_f32x4(vxGHIJ, vlog2e, vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
const v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_hi, vx0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_hi, vx4567);
v128_t vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_hi, vx89AB);
v128_t vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vminus_ln2_hi, vxCDEF);
v128_t vtGHIJ = __builtin_wasm_relaxed_madd_f32x4(vnGHIJ, vminus_ln2_hi, vxGHIJ);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_lo, vt4567);
vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_lo, vt89AB);
vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vminus_ln2_lo, vtCDEF);
vtGHIJ = __builtin_wasm_relaxed_madd_f32x4(vnGHIJ, vminus_ln2_lo, vtGHIJ);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vc5, vt0123, vc4);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vc5, vt4567, vc4);
v128_t vp89AB = __builtin_wasm_relaxed_madd_f32x4(vc5, vt89AB, vc4);
v128_t vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vc5, vtCDEF, vc4);
v128_t vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vc5, vtGHIJ, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc3);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc3);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc3);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vpGHIJ, vtGHIJ, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc2);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc2);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc2);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vpGHIJ, vtGHIJ, vc2);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc1);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc1);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc1);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc1);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vpGHIJ, vtGHIJ, vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
v128_t vf0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vs0123);
v128_t vf4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vs4567);
v128_t vf89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vs89AB);
v128_t vfCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vsCDEF);
v128_t vfGHIJ = __builtin_wasm_relaxed_madd_f32x4(vtGHIJ, vpGHIJ, vsGHIJ);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_lt(vx0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_lt(vx4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_lt(vx89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_lt(vxCDEF, vdenorm_cutoff));
vfGHIJ = wasm_v128_andnot(vfGHIJ, wasm_f32x4_lt(vxGHIJ, vdenorm_cutoff));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
wasm_v128_store(output + 16, vfGHIJ);
output += 20;
vacc0 = wasm_f32x4_add(vacc0, vf0123);
vacc4 = wasm_f32x4_add(vacc4, vf4567);
vacc3 = wasm_f32x4_add(vacc3, vf89AB);
vacc2 = wasm_f32x4_add(vacc2, vfCDEF);
vacc1 = wasm_f32x4_add(vacc1, vfGHIJ);
}
// Add up all accumulators to vacc0
vacc0 = wasm_f32x4_add(vacc0, vacc1);
vacc2 = wasm_f32x4_add(vacc2, vacc3);
vacc0 = wasm_f32x4_add(vacc0, vacc2);
vacc0 = wasm_f32x4_add(vacc0, vacc4);
v128_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vi = wasm_v128_load(input);
input += 4;
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vx, vlog2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vx);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc5, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
wasm_v128_store(output, vf);
output += 4;
vacc = wasm_f32x4_add(vacc, vf);
}
vacc = wasm_f32x4_add(vacc, wasm_v64x2_shuffle(vacc, vacc, 1, 1));
float vsum = wasm_f32x4_extract_lane(vacc, 0) + wasm_f32x4_extract_lane(vacc, 1);
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const v128_t vi = wasm_v128_load(input);
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vx, vlog2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vx);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc5, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
output += 2;
vsum += wasm_f32x4_extract_lane(vf, 0) + wasm_f32x4_extract_lane(vf, 1);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
vsum += wasm_f32x4_extract_lane(vf, 0);
}
}
*sum = vsum;
}
| 9,820 | 41.7 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-wasmrelaxedsimd-rr2-p5-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/wasmsimd-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_x20(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const v128_t vi_max = wasm_v128_load32_splat(max);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
// Load 20 (5x4) inputs at a time.
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
const v128_t viGHIJ = wasm_v128_load(input + 16);
input += 20;
const v128_t vx0123 = wasm_f32x4_sub(vi0123, vi_max);
const v128_t vx4567 = wasm_f32x4_sub(vi4567, vi_max);
const v128_t vx89AB = wasm_f32x4_sub(vi89AB, vi_max);
const v128_t vxCDEF = wasm_f32x4_sub(viCDEF, vi_max);
const v128_t vxGHIJ = wasm_f32x4_sub(viGHIJ, vi_max);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vx0123, vlog2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vx4567, vlog2e, vmagic_bias);
v128_t vn89AB = __builtin_wasm_relaxed_madd_f32x4(vx89AB, vlog2e, vmagic_bias);
v128_t vnCDEF = __builtin_wasm_relaxed_madd_f32x4(vxCDEF, vlog2e, vmagic_bias);
v128_t vnGHIJ = __builtin_wasm_relaxed_madd_f32x4(vxGHIJ, vlog2e, vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
const v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_hi, vx0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_hi, vx4567);
v128_t vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_hi, vx89AB);
v128_t vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vminus_ln2_hi, vxCDEF);
v128_t vtGHIJ = __builtin_wasm_relaxed_madd_f32x4(vnGHIJ, vminus_ln2_hi, vxGHIJ);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_lo, vt4567);
vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_lo, vt89AB);
vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vminus_ln2_lo, vtCDEF);
vtGHIJ = __builtin_wasm_relaxed_madd_f32x4(vnGHIJ, vminus_ln2_lo, vtGHIJ);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vc5, vt0123, vc4);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vc5, vt4567, vc4);
v128_t vp89AB = __builtin_wasm_relaxed_madd_f32x4(vc5, vt89AB, vc4);
v128_t vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vc5, vtCDEF, vc4);
v128_t vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vc5, vtGHIJ, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc3);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc3);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc3);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vpGHIJ, vtGHIJ, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc2);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc2);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc2);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vpGHIJ, vtGHIJ, vc2);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc1);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc1);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc1);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc1);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vpGHIJ, vtGHIJ, vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
v128_t vf0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vs0123);
v128_t vf4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vs4567);
v128_t vf89AB = __builtin_wasm_relaxed_madd_f32x4(vt89AB, vp89AB, vs89AB);
v128_t vfCDEF = __builtin_wasm_relaxed_madd_f32x4(vtCDEF, vpCDEF, vsCDEF);
v128_t vfGHIJ = __builtin_wasm_relaxed_madd_f32x4(vtGHIJ, vpGHIJ, vsGHIJ);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_lt(vx0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_lt(vx4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_lt(vx89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_lt(vxCDEF, vdenorm_cutoff));
vfGHIJ = wasm_v128_andnot(vfGHIJ, wasm_f32x4_lt(vxGHIJ, vdenorm_cutoff));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
wasm_v128_store(output + 16, vfGHIJ);
output += 20;
vacc0 = wasm_f32x4_add(vacc0, vf0123);
vacc0 = wasm_f32x4_add(vacc0, vf4567);
vacc0 = wasm_f32x4_add(vacc0, vf89AB);
vacc0 = wasm_f32x4_add(vacc0, vfCDEF);
vacc0 = wasm_f32x4_add(vacc0, vfGHIJ);
}
v128_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vi = wasm_v128_load(input);
input += 4;
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vx, vlog2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vx);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc5, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
wasm_v128_store(output, vf);
output += 4;
vacc = wasm_f32x4_add(vacc, vf);
}
vacc = wasm_f32x4_add(vacc, wasm_v64x2_shuffle(vacc, vacc, 1, 1));
float vsum = wasm_f32x4_extract_lane(vacc, 0) + wasm_f32x4_extract_lane(vacc, 1);
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const v128_t vi = wasm_v128_load(input);
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vx, vlog2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vx);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc5, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
output += 2;
vsum += wasm_f32x4_extract_lane(vf, 0) + wasm_f32x4_extract_lane(vf, 1);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
vsum += wasm_f32x4_extract_lane(vf, 0);
}
}
*sum = vsum;
}
| 9,521 | 42.085973 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-wasmrelaxedsimd-rr2-p5-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/wasmsimd-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_x4(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const v128_t vi_max = wasm_v128_load32_splat(max);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
// Load 4 (1x4) inputs at a time.
const v128_t vi0123 = wasm_v128_load(input);
input += 4;
const v128_t vx0123 = wasm_f32x4_sub(vi0123, vi_max);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vx0123, vlog2e, vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_hi, vx0123);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_lo, vt0123);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vc5, vt0123, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc2);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
v128_t vf0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vs0123);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_lt(vx0123, vdenorm_cutoff));
wasm_v128_store(output, vf0123);
output += 4;
vacc0 = wasm_f32x4_add(vacc0, vf0123);
}
v128_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vi = wasm_v128_load(input);
input += 4;
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vx, vlog2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vx);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc5, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
wasm_v128_store(output, vf);
output += 4;
vacc = wasm_f32x4_add(vacc, vf);
}
vacc = wasm_f32x4_add(vacc, wasm_v64x2_shuffle(vacc, vacc, 1, 1));
float vsum = wasm_f32x4_extract_lane(vacc, 0) + wasm_f32x4_extract_lane(vacc, 1);
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const v128_t vi = wasm_v128_load(input);
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vx, vlog2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vx);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc5, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
output += 2;
vsum += wasm_f32x4_extract_lane(vf, 0) + wasm_f32x4_extract_lane(vf, 1);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
vsum += wasm_f32x4_extract_lane(vf, 0);
}
}
*sum = vsum;
}
| 5,391 | 33.343949 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-wasmrelaxedsimd-rr2-p5-x8-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/wasmsimd-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_x8_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const v128_t vi_max = wasm_v128_load32_splat(max);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
v128_t vacc1 = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 (2x4) inputs at a time.
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input += 8;
const v128_t vx0123 = wasm_f32x4_sub(vi0123, vi_max);
const v128_t vx4567 = wasm_f32x4_sub(vi4567, vi_max);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vx0123, vlog2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vx4567, vlog2e, vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_hi, vx0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_hi, vx4567);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_lo, vt4567);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vc5, vt0123, vc4);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vc5, vt4567, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc2);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc1);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
v128_t vf0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vs0123);
v128_t vf4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vs4567);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_lt(vx0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_lt(vx4567, vdenorm_cutoff));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
output += 8;
vacc0 = wasm_f32x4_add(vacc0, vf0123);
vacc0 = wasm_f32x4_add(vacc0, vf4567);
}
// Add up all accumulators to vacc0
vacc0 = wasm_f32x4_add(vacc0, vacc1);
v128_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vi = wasm_v128_load(input);
input += 4;
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vx, vlog2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vx);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc5, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
wasm_v128_store(output, vf);
output += 4;
vacc = wasm_f32x4_add(vacc, vf);
}
vacc = wasm_f32x4_add(vacc, wasm_v64x2_shuffle(vacc, vacc, 1, 1));
float vsum = wasm_f32x4_extract_lane(vacc, 0) + wasm_f32x4_extract_lane(vacc, 1);
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const v128_t vi = wasm_v128_load(input);
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vx, vlog2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vx);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc5, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
output += 2;
vsum += wasm_f32x4_extract_lane(vf, 0) + wasm_f32x4_extract_lane(vf, 1);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
vsum += wasm_f32x4_extract_lane(vf, 0);
}
}
*sum = vsum;
}
| 6,528 | 36.096591 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-wasmrelaxedsimd-rr2-p5-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/wasmsimd-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__wasmrelaxedsimd_rr2_p5_x8(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const v128_t vi_max = wasm_v128_load32_splat(max);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 (2x4) inputs at a time.
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input += 8;
const v128_t vx0123 = wasm_f32x4_sub(vi0123, vi_max);
const v128_t vx4567 = wasm_f32x4_sub(vi4567, vi_max);
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vx0123, vlog2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vx4567, vlog2e, vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_hi, vx0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_hi, vx4567);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_lo, vt4567);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vc5, vt0123, vc4);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vc5, vt4567, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc2);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc1);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
v128_t vf0123 = __builtin_wasm_relaxed_madd_f32x4(vt0123, vp0123, vs0123);
v128_t vf4567 = __builtin_wasm_relaxed_madd_f32x4(vt4567, vp4567, vs4567);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_lt(vx0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_lt(vx4567, vdenorm_cutoff));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
output += 8;
vacc0 = wasm_f32x4_add(vacc0, vf0123);
vacc0 = wasm_f32x4_add(vacc0, vf4567);
}
v128_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vi = wasm_v128_load(input);
input += 4;
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vx, vlog2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vx);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc5, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
wasm_v128_store(output, vf);
output += 4;
vacc = wasm_f32x4_add(vacc, vf);
}
vacc = wasm_f32x4_add(vacc, wasm_v64x2_shuffle(vacc, vacc, 1, 1));
float vsum = wasm_f32x4_extract_lane(vacc, 0) + wasm_f32x4_extract_lane(vacc, 1);
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const v128_t vi = wasm_v128_load(input);
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vx, vlog2e, vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vx);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc5, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = __builtin_wasm_relaxed_madd_f32x4(vt, vp, vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
output += 2;
vsum += wasm_f32x4_extract_lane(vf, 0) + wasm_f32x4_extract_lane(vf, 1);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
vsum += wasm_f32x4_extract_lane(vf, 0);
}
}
*sum = vsum;
}
| 6,421 | 36.121387 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-wasmsimd-rr2-p5-x12-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/wasmsimd-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x12_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const v128_t vi_max = wasm_v128_load32_splat(max);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
v128_t vacc1 = vacc0;
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
// Load 12 (3x4) inputs at a time.
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
input += 12;
const v128_t vx0123 = wasm_f32x4_sub(vi0123, vi_max);
const v128_t vx4567 = wasm_f32x4_sub(vi4567, vi_max);
const v128_t vx89AB = wasm_f32x4_sub(vi89AB, vi_max);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vx0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vx4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vx89AB, vlog2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vx0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vx4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vx89AB);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt0123), vc4);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt4567), vc4);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt89AB), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc1);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc1);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
v128_t vf0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vs0123);
v128_t vf4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vs4567);
v128_t vf89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vs89AB);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_lt(vx0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_lt(vx4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_lt(vx89AB, vdenorm_cutoff));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
output += 12;
vacc0 = wasm_f32x4_add(vacc0, vf0123);
vacc0 = wasm_f32x4_add(vacc0, vf4567);
vacc0 = wasm_f32x4_add(vacc0, vf89AB);
}
// Add up all accumulators to vacc0
vacc0 = wasm_f32x4_add(vacc0, vacc1);
v128_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vi = wasm_v128_load(input);
input += 4;
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
wasm_v128_store(output, vf);
output += 4;
vacc = wasm_f32x4_add(vacc, vf);
}
vacc = wasm_f32x4_add(vacc, wasm_v64x2_shuffle(vacc, vacc, 1, 1));
float vsum = wasm_f32x4_extract_lane(vacc, 0) + wasm_f32x4_extract_lane(vacc, 1);
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const v128_t vi = wasm_v128_load(input);
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
output += 2;
vsum += wasm_f32x4_extract_lane(vf, 0) + wasm_f32x4_extract_lane(vf, 1);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
vsum += wasm_f32x4_extract_lane(vf, 0);
}
}
*sum = vsum;
}
| 7,437 | 37.739583 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-wasmsimd-rr2-p5-x12-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/wasmsimd-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x12_acc3(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const v128_t vi_max = wasm_v128_load32_splat(max);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
v128_t vacc1 = vacc0;
v128_t vacc2 = vacc0;
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
// Load 12 (3x4) inputs at a time.
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
input += 12;
const v128_t vx0123 = wasm_f32x4_sub(vi0123, vi_max);
const v128_t vx4567 = wasm_f32x4_sub(vi4567, vi_max);
const v128_t vx89AB = wasm_f32x4_sub(vi89AB, vi_max);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vx0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vx4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vx89AB, vlog2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vx0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vx4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vx89AB);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt0123), vc4);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt4567), vc4);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt89AB), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc1);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc1);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
v128_t vf0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vs0123);
v128_t vf4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vs4567);
v128_t vf89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vs89AB);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_lt(vx0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_lt(vx4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_lt(vx89AB, vdenorm_cutoff));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
output += 12;
vacc0 = wasm_f32x4_add(vacc0, vf0123);
vacc1 = wasm_f32x4_add(vacc1, vf4567);
vacc2 = wasm_f32x4_add(vacc2, vf89AB);
}
// Add up all accumulators to vacc0
vacc0 = wasm_f32x4_add(vacc0, vacc1);
vacc0 = wasm_f32x4_add(vacc0, vacc2);
v128_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vi = wasm_v128_load(input);
input += 4;
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
wasm_v128_store(output, vf);
output += 4;
vacc = wasm_f32x4_add(vacc, vf);
}
vacc = wasm_f32x4_add(vacc, wasm_v64x2_shuffle(vacc, vacc, 1, 1));
float vsum = wasm_f32x4_extract_lane(vacc, 0) + wasm_f32x4_extract_lane(vacc, 1);
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const v128_t vi = wasm_v128_load(input);
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
output += 2;
vsum += wasm_f32x4_extract_lane(vf, 0) + wasm_f32x4_extract_lane(vf, 1);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
vsum += wasm_f32x4_extract_lane(vf, 0);
}
}
*sum = vsum;
}
| 7,501 | 37.670103 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-wasmsimd-rr2-p5-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/wasmsimd-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x12(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const v128_t vi_max = wasm_v128_load32_splat(max);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
// Load 12 (3x4) inputs at a time.
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
input += 12;
const v128_t vx0123 = wasm_f32x4_sub(vi0123, vi_max);
const v128_t vx4567 = wasm_f32x4_sub(vi4567, vi_max);
const v128_t vx89AB = wasm_f32x4_sub(vi89AB, vi_max);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vx0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vx4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vx89AB, vlog2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vx0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vx4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vx89AB);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt0123), vc4);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt4567), vc4);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt89AB), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc1);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc1);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
v128_t vf0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vs0123);
v128_t vf4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vs4567);
v128_t vf89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vs89AB);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_lt(vx0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_lt(vx4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_lt(vx89AB, vdenorm_cutoff));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
output += 12;
vacc0 = wasm_f32x4_add(vacc0, vf0123);
vacc0 = wasm_f32x4_add(vacc0, vf4567);
vacc0 = wasm_f32x4_add(vacc0, vf89AB);
}
v128_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vi = wasm_v128_load(input);
input += 4;
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
wasm_v128_store(output, vf);
output += 4;
vacc = wasm_f32x4_add(vacc, vf);
}
vacc = wasm_f32x4_add(vacc, wasm_v64x2_shuffle(vacc, vacc, 1, 1));
float vsum = wasm_f32x4_extract_lane(vacc, 0) + wasm_f32x4_extract_lane(vacc, 1);
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const v128_t vi = wasm_v128_load(input);
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
output += 2;
vsum += wasm_f32x4_extract_lane(vf, 0) + wasm_f32x4_extract_lane(vf, 1);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
vsum += wasm_f32x4_extract_lane(vf, 0);
}
}
*sum = vsum;
}
| 7,330 | 37.78836 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-wasmsimd-rr2-p5-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/wasmsimd-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x16_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const v128_t vi_max = wasm_v128_load32_splat(max);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
v128_t vacc1 = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 (4x4) inputs at a time.
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
input += 16;
const v128_t vx0123 = wasm_f32x4_sub(vi0123, vi_max);
const v128_t vx4567 = wasm_f32x4_sub(vi4567, vi_max);
const v128_t vx89AB = wasm_f32x4_sub(vi89AB, vi_max);
const v128_t vxCDEF = wasm_f32x4_sub(viCDEF, vi_max);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vx0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vx4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vx89AB, vlog2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vxCDEF, vlog2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vx0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vx4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vx89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vxCDEF);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt0123), vc4);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt4567), vc4);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt89AB), vc4);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc5, vtCDEF), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc1);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc1);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc1);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
v128_t vf0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vs0123);
v128_t vf4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vs4567);
v128_t vf89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vs89AB);
v128_t vfCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vsCDEF);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_lt(vx0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_lt(vx4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_lt(vx89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_lt(vxCDEF, vdenorm_cutoff));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
output += 16;
vacc0 = wasm_f32x4_add(vacc0, vf0123);
vacc0 = wasm_f32x4_add(vacc0, vf4567);
vacc0 = wasm_f32x4_add(vacc0, vf89AB);
vacc0 = wasm_f32x4_add(vacc0, vfCDEF);
}
// Add up all accumulators to vacc0
vacc0 = wasm_f32x4_add(vacc0, vacc1);
v128_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vi = wasm_v128_load(input);
input += 4;
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
wasm_v128_store(output, vf);
output += 4;
vacc = wasm_f32x4_add(vacc, vf);
}
vacc = wasm_f32x4_add(vacc, wasm_v64x2_shuffle(vacc, vacc, 1, 1));
float vsum = wasm_f32x4_extract_lane(vacc, 0) + wasm_f32x4_extract_lane(vacc, 1);
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const v128_t vi = wasm_v128_load(input);
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
output += 2;
vsum += wasm_f32x4_extract_lane(vf, 0) + wasm_f32x4_extract_lane(vf, 1);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
vsum += wasm_f32x4_extract_lane(vf, 0);
}
}
*sum = vsum;
}
| 8,445 | 39.605769 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-wasmsimd-rr2-p5-x16-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/wasmsimd-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x16_acc4(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const v128_t vi_max = wasm_v128_load32_splat(max);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
v128_t vacc1 = vacc0;
v128_t vacc2 = vacc0;
v128_t vacc3 = vacc0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 (4x4) inputs at a time.
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
input += 16;
const v128_t vx0123 = wasm_f32x4_sub(vi0123, vi_max);
const v128_t vx4567 = wasm_f32x4_sub(vi4567, vi_max);
const v128_t vx89AB = wasm_f32x4_sub(vi89AB, vi_max);
const v128_t vxCDEF = wasm_f32x4_sub(viCDEF, vi_max);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vx0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vx4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vx89AB, vlog2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vxCDEF, vlog2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vx0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vx4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vx89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vxCDEF);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt0123), vc4);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt4567), vc4);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt89AB), vc4);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc5, vtCDEF), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc1);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc1);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc1);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
v128_t vf0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vs0123);
v128_t vf4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vs4567);
v128_t vf89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vs89AB);
v128_t vfCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vsCDEF);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_lt(vx0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_lt(vx4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_lt(vx89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_lt(vxCDEF, vdenorm_cutoff));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
output += 16;
vacc0 = wasm_f32x4_add(vacc0, vf0123);
vacc0 = wasm_f32x4_add(vacc0, vf4567);
vacc0 = wasm_f32x4_add(vacc0, vf89AB);
vacc0 = wasm_f32x4_add(vacc0, vfCDEF);
}
// Add up all accumulators to vacc0
vacc0 = wasm_f32x4_add(vacc0, vacc1);
vacc2 = wasm_f32x4_add(vacc2, vacc3);
vacc0 = wasm_f32x4_add(vacc0, vacc2);
v128_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vi = wasm_v128_load(input);
input += 4;
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
wasm_v128_store(output, vf);
output += 4;
vacc = wasm_f32x4_add(vacc, vf);
}
vacc = wasm_f32x4_add(vacc, wasm_v64x2_shuffle(vacc, vacc, 1, 1));
float vsum = wasm_f32x4_extract_lane(vacc, 0) + wasm_f32x4_extract_lane(vacc, 1);
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const v128_t vi = wasm_v128_load(input);
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
output += 2;
vsum += wasm_f32x4_extract_lane(vf, 0) + wasm_f32x4_extract_lane(vf, 1);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
vsum += wasm_f32x4_extract_lane(vf, 0);
}
}
*sum = vsum;
}
| 8,573 | 39.443396 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-wasmsimd-rr2-p5-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/wasmsimd-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x16(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const v128_t vi_max = wasm_v128_load32_splat(max);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
// Load 16 (4x4) inputs at a time.
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
input += 16;
const v128_t vx0123 = wasm_f32x4_sub(vi0123, vi_max);
const v128_t vx4567 = wasm_f32x4_sub(vi4567, vi_max);
const v128_t vx89AB = wasm_f32x4_sub(vi89AB, vi_max);
const v128_t vxCDEF = wasm_f32x4_sub(viCDEF, vi_max);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vx0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vx4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vx89AB, vlog2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vxCDEF, vlog2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vx0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vx4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vx89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vxCDEF);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt0123), vc4);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt4567), vc4);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt89AB), vc4);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc5, vtCDEF), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc1);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc1);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc1);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
v128_t vf0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vs0123);
v128_t vf4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vs4567);
v128_t vf89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vs89AB);
v128_t vfCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vsCDEF);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_lt(vx0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_lt(vx4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_lt(vx89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_lt(vxCDEF, vdenorm_cutoff));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
output += 16;
vacc0 = wasm_f32x4_add(vacc0, vf0123);
vacc0 = wasm_f32x4_add(vacc0, vf4567);
vacc0 = wasm_f32x4_add(vacc0, vf89AB);
vacc0 = wasm_f32x4_add(vacc0, vfCDEF);
}
v128_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vi = wasm_v128_load(input);
input += 4;
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
wasm_v128_store(output, vf);
output += 4;
vacc = wasm_f32x4_add(vacc, vf);
}
vacc = wasm_f32x4_add(vacc, wasm_v64x2_shuffle(vacc, vacc, 1, 1));
float vsum = wasm_f32x4_extract_lane(vacc, 0) + wasm_f32x4_extract_lane(vacc, 1);
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const v128_t vi = wasm_v128_load(input);
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
output += 2;
vsum += wasm_f32x4_extract_lane(vf, 0) + wasm_f32x4_extract_lane(vf, 1);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
vsum += wasm_f32x4_extract_lane(vf, 0);
}
}
*sum = vsum;
}
| 8,338 | 39.678049 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-wasmsimd-rr2-p5-x20-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/wasmsimd-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x20_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const v128_t vi_max = wasm_v128_load32_splat(max);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
v128_t vacc1 = vacc0;
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
// Load 20 (5x4) inputs at a time.
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
const v128_t viGHIJ = wasm_v128_load(input + 16);
input += 20;
const v128_t vx0123 = wasm_f32x4_sub(vi0123, vi_max);
const v128_t vx4567 = wasm_f32x4_sub(vi4567, vi_max);
const v128_t vx89AB = wasm_f32x4_sub(vi89AB, vi_max);
const v128_t vxCDEF = wasm_f32x4_sub(viCDEF, vi_max);
const v128_t vxGHIJ = wasm_f32x4_sub(viGHIJ, vi_max);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vx0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vx4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vx89AB, vlog2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vxCDEF, vlog2e), vmagic_bias);
v128_t vnGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vxGHIJ, vlog2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
const v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vx0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vx4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vx89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vxCDEF);
v128_t vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_hi), vxGHIJ);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_lo), vtGHIJ);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt0123), vc4);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt4567), vc4);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt89AB), vc4);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc5, vtCDEF), vc4);
v128_t vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vc5, vtGHIJ), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc3);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc1);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc1);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc1);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc1);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
v128_t vf0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vs0123);
v128_t vf4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vs4567);
v128_t vf89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vs89AB);
v128_t vfCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vsCDEF);
v128_t vfGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vtGHIJ, vpGHIJ), vsGHIJ);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_lt(vx0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_lt(vx4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_lt(vx89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_lt(vxCDEF, vdenorm_cutoff));
vfGHIJ = wasm_v128_andnot(vfGHIJ, wasm_f32x4_lt(vxGHIJ, vdenorm_cutoff));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
wasm_v128_store(output + 16, vfGHIJ);
output += 20;
vacc0 = wasm_f32x4_add(vacc0, vf0123);
vacc0 = wasm_f32x4_add(vacc0, vf4567);
vacc0 = wasm_f32x4_add(vacc0, vf89AB);
vacc0 = wasm_f32x4_add(vacc0, vfCDEF);
vacc0 = wasm_f32x4_add(vacc0, vfGHIJ);
}
// Add up all accumulators to vacc0
vacc0 = wasm_f32x4_add(vacc0, vacc1);
v128_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vi = wasm_v128_load(input);
input += 4;
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
wasm_v128_store(output, vf);
output += 4;
vacc = wasm_f32x4_add(vacc, vf);
}
vacc = wasm_f32x4_add(vacc, wasm_v64x2_shuffle(vacc, vacc, 1, 1));
float vsum = wasm_f32x4_extract_lane(vacc, 0) + wasm_f32x4_extract_lane(vacc, 1);
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const v128_t vi = wasm_v128_load(input);
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
output += 2;
vsum += wasm_f32x4_extract_lane(vf, 0) + wasm_f32x4_extract_lane(vf, 1);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
vsum += wasm_f32x4_extract_lane(vf, 0);
}
}
*sum = vsum;
}
| 9,453 | 41.205357 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-wasmsimd-rr2-p5-x20-acc5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/wasmsimd-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x20_acc5(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const v128_t vi_max = wasm_v128_load32_splat(max);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
v128_t vacc1 = vacc0;
v128_t vacc2 = vacc0;
v128_t vacc3 = vacc0;
v128_t vacc4 = vacc0;
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
// Load 20 (5x4) inputs at a time.
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
const v128_t viGHIJ = wasm_v128_load(input + 16);
input += 20;
const v128_t vx0123 = wasm_f32x4_sub(vi0123, vi_max);
const v128_t vx4567 = wasm_f32x4_sub(vi4567, vi_max);
const v128_t vx89AB = wasm_f32x4_sub(vi89AB, vi_max);
const v128_t vxCDEF = wasm_f32x4_sub(viCDEF, vi_max);
const v128_t vxGHIJ = wasm_f32x4_sub(viGHIJ, vi_max);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vx0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vx4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vx89AB, vlog2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vxCDEF, vlog2e), vmagic_bias);
v128_t vnGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vxGHIJ, vlog2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
const v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vx0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vx4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vx89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vxCDEF);
v128_t vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_hi), vxGHIJ);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_lo), vtGHIJ);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt0123), vc4);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt4567), vc4);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt89AB), vc4);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc5, vtCDEF), vc4);
v128_t vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vc5, vtGHIJ), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc3);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc1);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc1);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc1);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc1);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
v128_t vf0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vs0123);
v128_t vf4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vs4567);
v128_t vf89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vs89AB);
v128_t vfCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vsCDEF);
v128_t vfGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vtGHIJ, vpGHIJ), vsGHIJ);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_lt(vx0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_lt(vx4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_lt(vx89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_lt(vxCDEF, vdenorm_cutoff));
vfGHIJ = wasm_v128_andnot(vfGHIJ, wasm_f32x4_lt(vxGHIJ, vdenorm_cutoff));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
wasm_v128_store(output + 16, vfGHIJ);
output += 20;
vacc0 = wasm_f32x4_add(vacc0, vf0123);
vacc4 = wasm_f32x4_add(vacc4, vf4567);
vacc3 = wasm_f32x4_add(vacc3, vf89AB);
vacc2 = wasm_f32x4_add(vacc2, vfCDEF);
vacc1 = wasm_f32x4_add(vacc1, vfGHIJ);
}
// Add up all accumulators to vacc0
vacc0 = wasm_f32x4_add(vacc0, vacc1);
vacc2 = wasm_f32x4_add(vacc2, vacc3);
vacc0 = wasm_f32x4_add(vacc0, vacc2);
vacc0 = wasm_f32x4_add(vacc0, vacc4);
v128_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vi = wasm_v128_load(input);
input += 4;
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
wasm_v128_store(output, vf);
output += 4;
vacc = wasm_f32x4_add(vacc, vf);
}
vacc = wasm_f32x4_add(vacc, wasm_v64x2_shuffle(vacc, vacc, 1, 1));
float vsum = wasm_f32x4_extract_lane(vacc, 0) + wasm_f32x4_extract_lane(vacc, 1);
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const v128_t vi = wasm_v128_load(input);
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
output += 2;
vsum += wasm_f32x4_extract_lane(vf, 0) + wasm_f32x4_extract_lane(vf, 1);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
vsum += wasm_f32x4_extract_lane(vf, 0);
}
}
*sum = vsum;
}
| 9,645 | 40.93913 | 94 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.