repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut4-p4-perm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut4-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut4_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut4_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut4_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut4_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut4_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut4_p4.log2e);
const __m256 vtable = _mm256_load_ps(params->avx2_rr1_lut4_p4.table);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut4_p4.minus_ln2);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c2);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut4_p4.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 4,095 | 34.617391 | 125 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut4-p4-perm-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut4-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut4_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut4_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut4_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut4_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut4_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut4_p4.log2e);
const __m256 vtable = _mm256_load_ps(params->avx2_rr1_lut4_p4.table);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut4_p4.minus_ln2);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c2);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
__m256 vx5 = _mm256_loadu_ps(input + 40);
__m256 vx6 = _mm256_loadu_ps(input + 48);
__m256 vx7 = _mm256_loadu_ps(input + 56);
__m256 vx8 = _mm256_loadu_ps(input + 64);
__m256 vx9 = _mm256_loadu_ps(input + 72);
input += 80;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
const __m256 vz7 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx7, vprescale));
const __m256 vz8 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx8, vprescale));
const __m256 vz9 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx9, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vz9, vlog2e, vmagic_bias);
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 21);
const __m256i vl0 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0)));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 21);
const __m256i vl1 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1)));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 21);
const __m256i vl2 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn2)));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 21);
const __m256i vl3 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn3)));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 21);
const __m256i vl4 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn4)));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 21);
const __m256i vl5 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn5)));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256i ven6 = _mm256_slli_epi32(_mm256_castps_si256(vn6), 21);
const __m256i vl6 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn6)));
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
const __m256i ven7 = _mm256_slli_epi32(_mm256_castps_si256(vn7), 21);
const __m256i vl7 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn7)));
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
const __m256i ven8 = _mm256_slli_epi32(_mm256_castps_si256(vn8), 21);
const __m256i vl8 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn8)));
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
const __m256i ven9 = _mm256_slli_epi32(_mm256_castps_si256(vn9), 21);
const __m256i vl9 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn9)));
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vs6 = _mm256_castsi256_ps(_mm256_add_epi32(vl6, ven6));
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vs7 = _mm256_castsi256_ps(_mm256_add_epi32(vl7, ven7));
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vs8 = _mm256_castsi256_ps(_mm256_add_epi32(vl8, ven8));
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
__m256 vs9 = _mm256_castsi256_ps(_mm256_add_epi32(vl9, ven9));
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vz9);
__m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
__m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
__m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
__m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
__m256 vp4 = _mm256_fmadd_ps(vc4, vt4, vc3);
__m256 vp5 = _mm256_fmadd_ps(vc4, vt5, vc3);
__m256 vp6 = _mm256_fmadd_ps(vc4, vt6, vc3);
__m256 vp7 = _mm256_fmadd_ps(vc4, vt7, vc3);
__m256 vp8 = _mm256_fmadd_ps(vc4, vt8, vc3);
__m256 vp9 = _mm256_fmadd_ps(vc4, vt9, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt4 = _mm256_mul_ps(vt4, vs4);
vp5 = _mm256_mul_ps(vp5, vt5);
vt5 = _mm256_mul_ps(vt5, vs5);
vp6 = _mm256_mul_ps(vp6, vt6);
vt6 = _mm256_mul_ps(vt6, vs6);
vp7 = _mm256_mul_ps(vp7, vt7);
vt7 = _mm256_mul_ps(vt7, vs7);
vp8 = _mm256_mul_ps(vp8, vt8);
vt8 = _mm256_mul_ps(vt8, vs8);
vp9 = _mm256_mul_ps(vp9, vt9);
vt9 = _mm256_mul_ps(vt9, vs9);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
vs7 = _mm256_fmsub_ps(vs7, valpha, valpha);
vp7 = _mm256_fmadd_ps(vp7, vt7, vt7);
vs8 = _mm256_fmsub_ps(vs8, valpha, valpha);
vp8 = _mm256_fmadd_ps(vp8, vt8, vt8);
vs9 = _mm256_fmsub_ps(vs9, valpha, valpha);
vp9 = _mm256_fmadd_ps(vp9, vt9, vt9);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
vx5 = _mm256_mul_ps(vx5, vbeta);
const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
vx6 = _mm256_mul_ps(vx6, vbeta);
const __m256 ve7 = _mm256_fmadd_ps(vp7, valpha, vs7);
vx7 = _mm256_mul_ps(vx7, vbeta);
const __m256 ve8 = _mm256_fmadd_ps(vp8, valpha, vs8);
vx8 = _mm256_mul_ps(vx8, vbeta);
const __m256 ve9 = _mm256_fmadd_ps(vp9, valpha, vs9);
vx9 = _mm256_mul_ps(vx9, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
const __m256 vy7 = _mm256_blendv_ps(vx7, ve7, vx7);
const __m256 vy8 = _mm256_blendv_ps(vx8, ve8, vx8);
const __m256 vy9 = _mm256_blendv_ps(vx9, ve9, vx9);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
_mm256_storeu_ps(output + 64, vy8);
_mm256_storeu_ps(output + 72, vy9);
output += 80;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut4_p4.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 13,913 | 43.883871 | 125 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut8-p4-perm-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut8-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut8_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut8_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut8_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut8_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut8_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut8_p4.log2e);
const __m256i vtable = _mm256_load_si256((const __m256i*) params->avx2_rr1_lut8_p4.table);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut8_p4.minus_ln2);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c2);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
input += 16;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 20);
const __m256i vl0 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn0));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 20);
const __m256i vl1 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn1));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
__m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut8_p4.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 6,110 | 35.813253 | 125 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut8-p4-perm-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut8-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut8_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut8_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut8_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut8_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut8_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut8_p4.log2e);
const __m256i vtable = _mm256_load_si256((const __m256i*) params->avx2_rr1_lut8_p4.table);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut8_p4.minus_ln2);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c2);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
input += 24;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 20);
const __m256i vl0 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn0));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 20);
const __m256i vl1 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn1));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 20);
const __m256i vl2 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn2));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
__m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
__m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut8_p4.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 7,067 | 37.413043 | 125 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut8-p4-perm-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut8-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut8_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut8_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut8_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut8_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut8_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut8_p4.log2e);
const __m256i vtable = _mm256_load_si256((const __m256i*) params->avx2_rr1_lut8_p4.table);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut8_p4.minus_ln2);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c2);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
input += 32;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 20);
const __m256i vl0 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn0));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 20);
const __m256i vl1 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn1));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 20);
const __m256i vl2 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn2));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 20);
const __m256i vl3 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn3));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
__m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
__m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
__m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut8_p4.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 8,024 | 38.727723 | 125 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut8-p4-perm-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut8-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x40(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut8_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut8_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut8_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut8_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut8_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut8_p4.log2e);
const __m256i vtable = _mm256_load_si256((const __m256i*) params->avx2_rr1_lut8_p4.table);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut8_p4.minus_ln2);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c2);
for (; batch >= 40 * sizeof(float); batch -= 40 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
input += 40;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 20);
const __m256i vl0 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn0));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 20);
const __m256i vl1 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn1));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 20);
const __m256i vl2 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn2));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 20);
const __m256i vl3 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn3));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 20);
const __m256i vl4 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn4));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
__m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
__m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
__m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
__m256 vp4 = _mm256_fmadd_ps(vc4, vt4, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt4 = _mm256_mul_ps(vt4, vs4);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
output += 40;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut8_p4.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 8,981 | 39.827273 | 125 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut8-p4-perm-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut8-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut8_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut8_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut8_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut8_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut8_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut8_p4.log2e);
const __m256i vtable = _mm256_load_si256((const __m256i*) params->avx2_rr1_lut8_p4.table);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut8_p4.minus_ln2);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c2);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
__m256 vx5 = _mm256_loadu_ps(input + 40);
input += 48;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 20);
const __m256i vl0 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn0));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 20);
const __m256i vl1 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn1));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 20);
const __m256i vl2 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn2));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 20);
const __m256i vl3 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn3));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 20);
const __m256i vl4 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn4));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 20);
const __m256i vl5 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn5));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
__m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
__m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
__m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
__m256 vp4 = _mm256_fmadd_ps(vc4, vt4, vc3);
__m256 vp5 = _mm256_fmadd_ps(vc4, vt5, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt4 = _mm256_mul_ps(vt4, vs4);
vp5 = _mm256_mul_ps(vp5, vt5);
vt5 = _mm256_mul_ps(vt5, vs5);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
vx5 = _mm256_mul_ps(vx5, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
output += 48;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut8_p4.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 9,938 | 40.760504 | 125 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut8-p4-perm-x56.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut8-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x56(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut8_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut8_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut8_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut8_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut8_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut8_p4.log2e);
const __m256i vtable = _mm256_load_si256((const __m256i*) params->avx2_rr1_lut8_p4.table);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut8_p4.minus_ln2);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c2);
for (; batch >= 56 * sizeof(float); batch -= 56 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
__m256 vx5 = _mm256_loadu_ps(input + 40);
__m256 vx6 = _mm256_loadu_ps(input + 48);
input += 56;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 20);
const __m256i vl0 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn0));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 20);
const __m256i vl1 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn1));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 20);
const __m256i vl2 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn2));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 20);
const __m256i vl3 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn3));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 20);
const __m256i vl4 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn4));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 20);
const __m256i vl5 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn5));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256i ven6 = _mm256_slli_epi32(_mm256_castps_si256(vn6), 20);
const __m256i vl6 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn6));
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vs6 = _mm256_castsi256_ps(_mm256_add_epi32(vl6, ven6));
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
__m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
__m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
__m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
__m256 vp4 = _mm256_fmadd_ps(vc4, vt4, vc3);
__m256 vp5 = _mm256_fmadd_ps(vc4, vt5, vc3);
__m256 vp6 = _mm256_fmadd_ps(vc4, vt6, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt4 = _mm256_mul_ps(vt4, vs4);
vp5 = _mm256_mul_ps(vp5, vt5);
vt5 = _mm256_mul_ps(vt5, vs5);
vp6 = _mm256_mul_ps(vp6, vt6);
vt6 = _mm256_mul_ps(vt6, vs6);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
vx5 = _mm256_mul_ps(vx5, vbeta);
const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
vx6 = _mm256_mul_ps(vx6, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
output += 56;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut8_p4.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 10,895 | 41.5625 | 125 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut8-p4-perm-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut8-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut8_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut8_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut8_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut8_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut8_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut8_p4.log2e);
const __m256i vtable = _mm256_load_si256((const __m256i*) params->avx2_rr1_lut8_p4.table);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut8_p4.minus_ln2);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c2);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
__m256 vx5 = _mm256_loadu_ps(input + 40);
__m256 vx6 = _mm256_loadu_ps(input + 48);
__m256 vx7 = _mm256_loadu_ps(input + 56);
input += 64;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
const __m256 vz7 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx7, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 20);
const __m256i vl0 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn0));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 20);
const __m256i vl1 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn1));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 20);
const __m256i vl2 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn2));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 20);
const __m256i vl3 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn3));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 20);
const __m256i vl4 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn4));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 20);
const __m256i vl5 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn5));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256i ven6 = _mm256_slli_epi32(_mm256_castps_si256(vn6), 20);
const __m256i vl6 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn6));
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
const __m256i ven7 = _mm256_slli_epi32(_mm256_castps_si256(vn7), 20);
const __m256i vl7 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn7));
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vs6 = _mm256_castsi256_ps(_mm256_add_epi32(vl6, ven6));
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vs7 = _mm256_castsi256_ps(_mm256_add_epi32(vl7, ven7));
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
__m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
__m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
__m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
__m256 vp4 = _mm256_fmadd_ps(vc4, vt4, vc3);
__m256 vp5 = _mm256_fmadd_ps(vc4, vt5, vc3);
__m256 vp6 = _mm256_fmadd_ps(vc4, vt6, vc3);
__m256 vp7 = _mm256_fmadd_ps(vc4, vt7, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt4 = _mm256_mul_ps(vt4, vs4);
vp5 = _mm256_mul_ps(vp5, vt5);
vt5 = _mm256_mul_ps(vt5, vs5);
vp6 = _mm256_mul_ps(vp6, vt6);
vt6 = _mm256_mul_ps(vt6, vs6);
vp7 = _mm256_mul_ps(vp7, vt7);
vt7 = _mm256_mul_ps(vt7, vs7);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
vs7 = _mm256_fmsub_ps(vs7, valpha, valpha);
vp7 = _mm256_fmadd_ps(vp7, vt7, vt7);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
vx5 = _mm256_mul_ps(vx5, vbeta);
const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
vx6 = _mm256_mul_ps(vx6, vbeta);
const __m256 ve7 = _mm256_fmadd_ps(vp7, valpha, vs7);
vx7 = _mm256_mul_ps(vx7, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
const __m256 vy7 = _mm256_blendv_ps(vx7, ve7, vx7);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
output += 64;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut8_p4.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 11,852 | 42.259124 | 125 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut8-p4-perm-x72.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut8-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x72(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut8_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut8_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut8_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut8_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut8_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut8_p4.log2e);
const __m256i vtable = _mm256_load_si256((const __m256i*) params->avx2_rr1_lut8_p4.table);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut8_p4.minus_ln2);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c2);
for (; batch >= 72 * sizeof(float); batch -= 72 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
__m256 vx5 = _mm256_loadu_ps(input + 40);
__m256 vx6 = _mm256_loadu_ps(input + 48);
__m256 vx7 = _mm256_loadu_ps(input + 56);
__m256 vx8 = _mm256_loadu_ps(input + 64);
input += 72;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
const __m256 vz7 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx7, vprescale));
const __m256 vz8 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx8, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 20);
const __m256i vl0 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn0));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 20);
const __m256i vl1 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn1));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 20);
const __m256i vl2 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn2));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 20);
const __m256i vl3 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn3));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 20);
const __m256i vl4 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn4));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 20);
const __m256i vl5 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn5));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256i ven6 = _mm256_slli_epi32(_mm256_castps_si256(vn6), 20);
const __m256i vl6 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn6));
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
const __m256i ven7 = _mm256_slli_epi32(_mm256_castps_si256(vn7), 20);
const __m256i vl7 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn7));
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
const __m256i ven8 = _mm256_slli_epi32(_mm256_castps_si256(vn8), 20);
const __m256i vl8 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn8));
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vs6 = _mm256_castsi256_ps(_mm256_add_epi32(vl6, ven6));
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vs7 = _mm256_castsi256_ps(_mm256_add_epi32(vl7, ven7));
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vs8 = _mm256_castsi256_ps(_mm256_add_epi32(vl8, ven8));
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
__m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
__m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
__m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
__m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
__m256 vp4 = _mm256_fmadd_ps(vc4, vt4, vc3);
__m256 vp5 = _mm256_fmadd_ps(vc4, vt5, vc3);
__m256 vp6 = _mm256_fmadd_ps(vc4, vt6, vc3);
__m256 vp7 = _mm256_fmadd_ps(vc4, vt7, vc3);
__m256 vp8 = _mm256_fmadd_ps(vc4, vt8, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt4 = _mm256_mul_ps(vt4, vs4);
vp5 = _mm256_mul_ps(vp5, vt5);
vt5 = _mm256_mul_ps(vt5, vs5);
vp6 = _mm256_mul_ps(vp6, vt6);
vt6 = _mm256_mul_ps(vt6, vs6);
vp7 = _mm256_mul_ps(vp7, vt7);
vt7 = _mm256_mul_ps(vt7, vs7);
vp8 = _mm256_mul_ps(vp8, vt8);
vt8 = _mm256_mul_ps(vt8, vs8);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
vs7 = _mm256_fmsub_ps(vs7, valpha, valpha);
vp7 = _mm256_fmadd_ps(vp7, vt7, vt7);
vs8 = _mm256_fmsub_ps(vs8, valpha, valpha);
vp8 = _mm256_fmadd_ps(vp8, vt8, vt8);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
vx5 = _mm256_mul_ps(vx5, vbeta);
const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
vx6 = _mm256_mul_ps(vx6, vbeta);
const __m256 ve7 = _mm256_fmadd_ps(vp7, valpha, vs7);
vx7 = _mm256_mul_ps(vx7, vbeta);
const __m256 ve8 = _mm256_fmadd_ps(vp8, valpha, vs8);
vx8 = _mm256_mul_ps(vx8, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
const __m256 vy7 = _mm256_blendv_ps(vx7, ve7, vx7);
const __m256 vy8 = _mm256_blendv_ps(vx8, ve8, vx8);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
_mm256_storeu_ps(output + 64, vy8);
output += 72;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut8_p4.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 12,809 | 42.869863 | 125 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut8-p4-perm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut8-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut8_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut8_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut8_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut8_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut8_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut8_p4.log2e);
const __m256i vtable = _mm256_load_si256((const __m256i*) params->avx2_rr1_lut8_p4.table);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut8_p4.minus_ln2);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c2);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut8_p4.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 4,088 | 34.556522 | 125 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut8-p4-perm-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut8-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut8_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut8_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut8_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut8_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut8_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut8_p4.log2e);
const __m256i vtable = _mm256_load_si256((const __m256i*) params->avx2_rr1_lut8_p4.table);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut8_p4.minus_ln2);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut8_p4.c2);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
__m256 vx5 = _mm256_loadu_ps(input + 40);
__m256 vx6 = _mm256_loadu_ps(input + 48);
__m256 vx7 = _mm256_loadu_ps(input + 56);
__m256 vx8 = _mm256_loadu_ps(input + 64);
__m256 vx9 = _mm256_loadu_ps(input + 72);
input += 80;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
const __m256 vz7 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx7, vprescale));
const __m256 vz8 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx8, vprescale));
const __m256 vz9 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx9, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vz9, vlog2e, vmagic_bias);
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 20);
const __m256i vl0 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn0));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 20);
const __m256i vl1 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn1));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 20);
const __m256i vl2 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn2));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 20);
const __m256i vl3 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn3));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 20);
const __m256i vl4 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn4));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 20);
const __m256i vl5 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn5));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256i ven6 = _mm256_slli_epi32(_mm256_castps_si256(vn6), 20);
const __m256i vl6 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn6));
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
const __m256i ven7 = _mm256_slli_epi32(_mm256_castps_si256(vn7), 20);
const __m256i vl7 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn7));
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
const __m256i ven8 = _mm256_slli_epi32(_mm256_castps_si256(vn8), 20);
const __m256i vl8 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn8));
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
const __m256i ven9 = _mm256_slli_epi32(_mm256_castps_si256(vn9), 20);
const __m256i vl9 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn9));
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vs6 = _mm256_castsi256_ps(_mm256_add_epi32(vl6, ven6));
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vs7 = _mm256_castsi256_ps(_mm256_add_epi32(vl7, ven7));
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vs8 = _mm256_castsi256_ps(_mm256_add_epi32(vl8, ven8));
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
__m256 vs9 = _mm256_castsi256_ps(_mm256_add_epi32(vl9, ven9));
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vz9);
__m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
__m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
__m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
__m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
__m256 vp4 = _mm256_fmadd_ps(vc4, vt4, vc3);
__m256 vp5 = _mm256_fmadd_ps(vc4, vt5, vc3);
__m256 vp6 = _mm256_fmadd_ps(vc4, vt6, vc3);
__m256 vp7 = _mm256_fmadd_ps(vc4, vt7, vc3);
__m256 vp8 = _mm256_fmadd_ps(vc4, vt8, vc3);
__m256 vp9 = _mm256_fmadd_ps(vc4, vt9, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt4 = _mm256_mul_ps(vt4, vs4);
vp5 = _mm256_mul_ps(vp5, vt5);
vt5 = _mm256_mul_ps(vt5, vs5);
vp6 = _mm256_mul_ps(vp6, vt6);
vt6 = _mm256_mul_ps(vt6, vs6);
vp7 = _mm256_mul_ps(vp7, vt7);
vt7 = _mm256_mul_ps(vt7, vs7);
vp8 = _mm256_mul_ps(vp8, vt8);
vt8 = _mm256_mul_ps(vt8, vs8);
vp9 = _mm256_mul_ps(vp9, vt9);
vt9 = _mm256_mul_ps(vt9, vs9);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
vs7 = _mm256_fmsub_ps(vs7, valpha, valpha);
vp7 = _mm256_fmadd_ps(vp7, vt7, vt7);
vs8 = _mm256_fmsub_ps(vs8, valpha, valpha);
vp8 = _mm256_fmadd_ps(vp8, vt8, vt8);
vs9 = _mm256_fmsub_ps(vs9, valpha, valpha);
vp9 = _mm256_fmadd_ps(vp9, vt9, vt9);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
vx5 = _mm256_mul_ps(vx5, vbeta);
const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
vx6 = _mm256_mul_ps(vx6, vbeta);
const __m256 ve7 = _mm256_fmadd_ps(vp7, valpha, vs7);
vx7 = _mm256_mul_ps(vx7, vbeta);
const __m256 ve8 = _mm256_fmadd_ps(vp8, valpha, vs8);
vx8 = _mm256_mul_ps(vx8, vbeta);
const __m256 ve9 = _mm256_fmadd_ps(vp9, valpha, vs9);
vx9 = _mm256_mul_ps(vx9, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
const __m256 vy7 = _mm256_blendv_ps(vx7, ve7, vx7);
const __m256 vy8 = _mm256_blendv_ps(vx8, ve8, vx8);
const __m256 vy9 = _mm256_blendv_ps(vx9, ve9, vx9);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
_mm256_storeu_ps(output + 64, vy8);
_mm256_storeu_ps(output + 72, vy9);
output += 80;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut8_p4.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20);
const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 13,766 | 43.409677 | 125 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-p6-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_p6_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_p6.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_p6.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_p6.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_p6.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p6.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p6.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p6.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx2_rr1_p6.c6);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p6.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p6.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p6.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p6.c2);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
input += 16;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
__m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vp0 = _mm256_fmadd_ps(vc6, vt0, vc5);
__m256 vp1 = _mm256_fmadd_ps(vc6, vt1, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p6.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 5,836 | 33.538462 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-p6-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_p6_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_p6.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_p6.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_p6.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_p6.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p6.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p6.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p6.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx2_rr1_p6.c6);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p6.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p6.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p6.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p6.c2);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
input += 24;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
__m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
__m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vp0 = _mm256_fmadd_ps(vc6, vt0, vc5);
__m256 vp1 = _mm256_fmadd_ps(vc6, vt1, vc5);
__m256 vp2 = _mm256_fmadd_ps(vc6, vt2, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p6.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 6,736 | 35.026738 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-p6-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_p6_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_p6.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_p6.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_p6.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_p6.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p6.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p6.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p6.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx2_rr1_p6.c6);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p6.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p6.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p6.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p6.c2);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
input += 32;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
__m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
__m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
__m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vp0 = _mm256_fmadd_ps(vc6, vt0, vc5);
__m256 vp1 = _mm256_fmadd_ps(vc6, vt1, vc5);
__m256 vp2 = _mm256_fmadd_ps(vc6, vt2, vc5);
__m256 vp3 = _mm256_fmadd_ps(vc6, vt3, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p6.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 7,636 | 36.253659 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-p6-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_p6_x40(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_p6.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_p6.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_p6.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_p6.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p6.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p6.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p6.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx2_rr1_p6.c6);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p6.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p6.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p6.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p6.c2);
for (; batch >= 40 * sizeof(float); batch -= 40 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
input += 40;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
__m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
__m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
__m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
__m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vp0 = _mm256_fmadd_ps(vc6, vt0, vc5);
__m256 vp1 = _mm256_fmadd_ps(vc6, vt1, vc5);
__m256 vp2 = _mm256_fmadd_ps(vc6, vt2, vc5);
__m256 vp3 = _mm256_fmadd_ps(vc6, vt3, vc5);
__m256 vp4 = _mm256_fmadd_ps(vc6, vt4, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt4 = _mm256_mul_ps(vt4, vs4);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
output += 40;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p6.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 8,536 | 37.282511 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-p6-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_p6_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_p6.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_p6.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_p6.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_p6.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p6.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p6.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p6.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx2_rr1_p6.c6);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p6.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p6.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p6.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p6.c2);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
__m256 vx5 = _mm256_loadu_ps(input + 40);
input += 48;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
__m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
__m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
__m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
__m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
__m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vp0 = _mm256_fmadd_ps(vc6, vt0, vc5);
__m256 vp1 = _mm256_fmadd_ps(vc6, vt1, vc5);
__m256 vp2 = _mm256_fmadd_ps(vc6, vt2, vc5);
__m256 vp3 = _mm256_fmadd_ps(vc6, vt3, vc5);
__m256 vp4 = _mm256_fmadd_ps(vc6, vt4, vc5);
__m256 vp5 = _mm256_fmadd_ps(vc6, vt5, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt4 = _mm256_mul_ps(vt4, vs4);
vp5 = _mm256_mul_ps(vp5, vt5);
vt5 = _mm256_mul_ps(vt5, vs5);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
vx5 = _mm256_mul_ps(vx5, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
output += 48;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p6.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 9,436 | 38.157676 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-p6-x56.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_p6_x56(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_p6.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_p6.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_p6.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_p6.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p6.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p6.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p6.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx2_rr1_p6.c6);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p6.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p6.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p6.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p6.c2);
for (; batch >= 56 * sizeof(float); batch -= 56 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
__m256 vx5 = _mm256_loadu_ps(input + 40);
__m256 vx6 = _mm256_loadu_ps(input + 48);
input += 56;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
__m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
__m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
__m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
__m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
__m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
__m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vp0 = _mm256_fmadd_ps(vc6, vt0, vc5);
__m256 vp1 = _mm256_fmadd_ps(vc6, vt1, vc5);
__m256 vp2 = _mm256_fmadd_ps(vc6, vt2, vc5);
__m256 vp3 = _mm256_fmadd_ps(vc6, vt3, vc5);
__m256 vp4 = _mm256_fmadd_ps(vc6, vt4, vc5);
__m256 vp5 = _mm256_fmadd_ps(vc6, vt5, vc5);
__m256 vp6 = _mm256_fmadd_ps(vc6, vt6, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt4 = _mm256_mul_ps(vt4, vs4);
vp5 = _mm256_mul_ps(vp5, vt5);
vt5 = _mm256_mul_ps(vt5, vs5);
vp6 = _mm256_mul_ps(vp6, vt6);
vt6 = _mm256_mul_ps(vt6, vs6);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
vx5 = _mm256_mul_ps(vx5, vbeta);
const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
vx6 = _mm256_mul_ps(vx6, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
output += 56;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p6.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 10,336 | 38.911197 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-p6-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_p6_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_p6.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_p6.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_p6.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_p6.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p6.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p6.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p6.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx2_rr1_p6.c6);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p6.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p6.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p6.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p6.c2);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
__m256 vx5 = _mm256_loadu_ps(input + 40);
__m256 vx6 = _mm256_loadu_ps(input + 48);
__m256 vx7 = _mm256_loadu_ps(input + 56);
input += 64;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
const __m256 vz7 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx7, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
__m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
__m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
__m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
__m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
__m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
__m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
__m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vp0 = _mm256_fmadd_ps(vc6, vt0, vc5);
__m256 vp1 = _mm256_fmadd_ps(vc6, vt1, vc5);
__m256 vp2 = _mm256_fmadd_ps(vc6, vt2, vc5);
__m256 vp3 = _mm256_fmadd_ps(vc6, vt3, vc5);
__m256 vp4 = _mm256_fmadd_ps(vc6, vt4, vc5);
__m256 vp5 = _mm256_fmadd_ps(vc6, vt5, vc5);
__m256 vp6 = _mm256_fmadd_ps(vc6, vt6, vc5);
__m256 vp7 = _mm256_fmadd_ps(vc6, vt7, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt4 = _mm256_mul_ps(vt4, vs4);
vp5 = _mm256_mul_ps(vp5, vt5);
vt5 = _mm256_mul_ps(vt5, vs5);
vp6 = _mm256_mul_ps(vp6, vt6);
vt6 = _mm256_mul_ps(vt6, vs6);
vp7 = _mm256_mul_ps(vp7, vt7);
vt7 = _mm256_mul_ps(vt7, vs7);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
vs7 = _mm256_fmsub_ps(vs7, valpha, valpha);
vp7 = _mm256_fmadd_ps(vp7, vt7, vt7);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
vx5 = _mm256_mul_ps(vx5, vbeta);
const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
vx6 = _mm256_mul_ps(vx6, vbeta);
const __m256 ve7 = _mm256_fmadd_ps(vp7, valpha, vs7);
vx7 = _mm256_mul_ps(vx7, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
const __m256 vy7 = _mm256_blendv_ps(vx7, ve7, vx7);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
output += 64;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p6.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 11,236 | 39.566787 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-p6-x72.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_p6_x72(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_p6.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_p6.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_p6.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_p6.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p6.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p6.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p6.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx2_rr1_p6.c6);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p6.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p6.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p6.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p6.c2);
for (; batch >= 72 * sizeof(float); batch -= 72 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
__m256 vx5 = _mm256_loadu_ps(input + 40);
__m256 vx6 = _mm256_loadu_ps(input + 48);
__m256 vx7 = _mm256_loadu_ps(input + 56);
__m256 vx8 = _mm256_loadu_ps(input + 64);
input += 72;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
const __m256 vz7 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx7, vprescale));
const __m256 vz8 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx8, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
__m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
__m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
__m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
__m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
__m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
__m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
__m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
__m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
__m256 vp0 = _mm256_fmadd_ps(vc6, vt0, vc5);
__m256 vp1 = _mm256_fmadd_ps(vc6, vt1, vc5);
__m256 vp2 = _mm256_fmadd_ps(vc6, vt2, vc5);
__m256 vp3 = _mm256_fmadd_ps(vc6, vt3, vc5);
__m256 vp4 = _mm256_fmadd_ps(vc6, vt4, vc5);
__m256 vp5 = _mm256_fmadd_ps(vc6, vt5, vc5);
__m256 vp6 = _mm256_fmadd_ps(vc6, vt6, vc5);
__m256 vp7 = _mm256_fmadd_ps(vc6, vt7, vc5);
__m256 vp8 = _mm256_fmadd_ps(vc6, vt8, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc4);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt4 = _mm256_mul_ps(vt4, vs4);
vp5 = _mm256_mul_ps(vp5, vt5);
vt5 = _mm256_mul_ps(vt5, vs5);
vp6 = _mm256_mul_ps(vp6, vt6);
vt6 = _mm256_mul_ps(vt6, vs6);
vp7 = _mm256_mul_ps(vp7, vt7);
vt7 = _mm256_mul_ps(vt7, vs7);
vp8 = _mm256_mul_ps(vp8, vt8);
vt8 = _mm256_mul_ps(vt8, vs8);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
vs7 = _mm256_fmsub_ps(vs7, valpha, valpha);
vp7 = _mm256_fmadd_ps(vp7, vt7, vt7);
vs8 = _mm256_fmsub_ps(vs8, valpha, valpha);
vp8 = _mm256_fmadd_ps(vp8, vt8, vt8);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
vx5 = _mm256_mul_ps(vx5, vbeta);
const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
vx6 = _mm256_mul_ps(vx6, vbeta);
const __m256 ve7 = _mm256_fmadd_ps(vp7, valpha, vs7);
vx7 = _mm256_mul_ps(vx7, vbeta);
const __m256 ve8 = _mm256_fmadd_ps(vp8, valpha, vs8);
vx8 = _mm256_mul_ps(vx8, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
const __m256 vy7 = _mm256_blendv_ps(vx7, ve7, vx7);
const __m256 vy8 = _mm256_blendv_ps(vx8, ve8, vx8);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
_mm256_storeu_ps(output + 64, vy8);
output += 72;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p6.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 12,136 | 40.142373 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-p6-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_p6_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_p6.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_p6.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_p6.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_p6.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p6.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p6.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p6.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx2_rr1_p6.c6);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p6.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p6.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p6.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p6.c2);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p6.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 3,926 | 32.853448 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-p6-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_p6_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_p6.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_p6.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_p6.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_p6.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p6.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p6.log2e);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p6.minus_ln2);
const __m256 vc6 = _mm256_load_ps(params->avx2_rr1_p6.c6);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p6.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p6.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p6.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p6.c2);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
__m256 vx5 = _mm256_loadu_ps(input + 40);
__m256 vx6 = _mm256_loadu_ps(input + 48);
__m256 vx7 = _mm256_loadu_ps(input + 56);
__m256 vx8 = _mm256_loadu_ps(input + 64);
__m256 vx9 = _mm256_loadu_ps(input + 72);
input += 80;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
const __m256 vz7 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx7, vprescale));
const __m256 vz8 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx8, vprescale));
const __m256 vz9 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx9, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vz9, vlog2e, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
__m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
__m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
__m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
__m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
__m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
__m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
__m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
__m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23));
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
__m256 vs9 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn9), 23));
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vz9);
__m256 vp0 = _mm256_fmadd_ps(vc6, vt0, vc5);
__m256 vp1 = _mm256_fmadd_ps(vc6, vt1, vc5);
__m256 vp2 = _mm256_fmadd_ps(vc6, vt2, vc5);
__m256 vp3 = _mm256_fmadd_ps(vc6, vt3, vc5);
__m256 vp4 = _mm256_fmadd_ps(vc6, vt4, vc5);
__m256 vp5 = _mm256_fmadd_ps(vc6, vt5, vc5);
__m256 vp6 = _mm256_fmadd_ps(vc6, vt6, vc5);
__m256 vp7 = _mm256_fmadd_ps(vc6, vt7, vc5);
__m256 vp8 = _mm256_fmadd_ps(vc6, vt8, vc5);
__m256 vp9 = _mm256_fmadd_ps(vc6, vt9, vc5);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc4);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc4);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc4);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc3);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp9 = _mm256_fmadd_ps(vp9, vt9, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt4 = _mm256_mul_ps(vt4, vs4);
vp5 = _mm256_mul_ps(vp5, vt5);
vt5 = _mm256_mul_ps(vt5, vs5);
vp6 = _mm256_mul_ps(vp6, vt6);
vt6 = _mm256_mul_ps(vt6, vs6);
vp7 = _mm256_mul_ps(vp7, vt7);
vt7 = _mm256_mul_ps(vt7, vs7);
vp8 = _mm256_mul_ps(vp8, vt8);
vt8 = _mm256_mul_ps(vt8, vs8);
vp9 = _mm256_mul_ps(vp9, vt9);
vt9 = _mm256_mul_ps(vt9, vs9);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
vs7 = _mm256_fmsub_ps(vs7, valpha, valpha);
vp7 = _mm256_fmadd_ps(vp7, vt7, vt7);
vs8 = _mm256_fmsub_ps(vs8, valpha, valpha);
vp8 = _mm256_fmadd_ps(vp8, vt8, vt8);
vs9 = _mm256_fmsub_ps(vs9, valpha, valpha);
vp9 = _mm256_fmadd_ps(vp9, vt9, vt9);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
vx5 = _mm256_mul_ps(vx5, vbeta);
const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
vx6 = _mm256_mul_ps(vx6, vbeta);
const __m256 ve7 = _mm256_fmadd_ps(vp7, valpha, vs7);
vx7 = _mm256_mul_ps(vx7, vbeta);
const __m256 ve8 = _mm256_fmadd_ps(vp8, valpha, vs8);
vx8 = _mm256_mul_ps(vx8, vbeta);
const __m256 ve9 = _mm256_fmadd_ps(vp9, valpha, vs9);
vx9 = _mm256_mul_ps(vx9, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
const __m256 vy7 = _mm256_blendv_ps(vx7, ve7, vx7);
const __m256 vy8 = _mm256_blendv_ps(vx8, ve8, vx8);
const __m256 vy9 = _mm256_blendv_ps(vx9, ve9, vx9);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
_mm256_storeu_ps(output + 64, vy8);
_mm256_storeu_ps(output + 72, vy9);
output += 80;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p6.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc6, vt, vc5);
vp = _mm256_fmadd_ps(vp, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 13,036 | 40.651757 | 120 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx512f-rr1-lut16-p3-perm-x112.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x112(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vprescale = _mm512_set1_ps(params->avx512_rr1_lut16_p3.prescale);
const __m512 valpha = _mm512_set1_ps(params->avx512_rr1_lut16_p3.alpha);
const __m512 vbeta = _mm512_set1_ps(params->avx512_rr1_lut16_p3.beta);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_rr1_lut16_p3.sat_cutoff);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_lut16_p3.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_lut16_p3.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c2);
const __m512i vtable = _mm512_load_si512(params->avx512_rr1_lut16_p3.table);
for (; batch >= 112 * sizeof(float); batch -= 112 * sizeof(float)) {
__m512 vx0 = _mm512_loadu_ps(input);
__m512 vx1 = _mm512_loadu_ps(input + 16);
__m512 vx2 = _mm512_loadu_ps(input + 32);
__m512 vx3 = _mm512_loadu_ps(input + 48);
__m512 vx4 = _mm512_loadu_ps(input + 64);
__m512 vx5 = _mm512_loadu_ps(input + 80);
__m512 vx6 = _mm512_loadu_ps(input + 96);
input += 112;
const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
const __m512 vz2 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx2, vprescale));
const __m512 vz3 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx3, vprescale));
const __m512 vz4 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx4, vprescale));
const __m512 vz5 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx5, vprescale));
const __m512 vz6 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx6, vprescale));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vlog2e, vmagic_bias);
const __m512i ven0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 19);
const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
const __m512i ven1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 19);
const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
const __m512i ven2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 19);
const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
const __m512i ven3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 19);
const __m512i vl3 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn3), vtable);
const __m512i ven4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 19);
const __m512i vl4 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn4), vtable);
const __m512i ven5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 19);
const __m512i vl5 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn5), vtable);
const __m512i ven6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 19);
const __m512i vl6 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn6), vtable);
__m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ven0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
__m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ven1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
__m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ven2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
__m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ven3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
__m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ven4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
__m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ven5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
__m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ven6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vz5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vz6);
__m512 vp0 = _mm512_fmadd_ps(vc3, vt0, vc2);
__m512 vp1 = _mm512_fmadd_ps(vc3, vt1, vc2);
__m512 vp2 = _mm512_fmadd_ps(vc3, vt2, vc2);
__m512 vp3 = _mm512_fmadd_ps(vc3, vt3, vc2);
__m512 vp4 = _mm512_fmadd_ps(vc3, vt4, vc2);
__m512 vp5 = _mm512_fmadd_ps(vc3, vt5, vc2);
__m512 vp6 = _mm512_fmadd_ps(vc3, vt6, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vt0 = _mm512_mul_ps(vt0, vs0);
vp1 = _mm512_mul_ps(vp1, vt1);
vt1 = _mm512_mul_ps(vt1, vs1);
vp2 = _mm512_mul_ps(vp2, vt2);
vt2 = _mm512_mul_ps(vt2, vs2);
vp3 = _mm512_mul_ps(vp3, vt3);
vt3 = _mm512_mul_ps(vt3, vs3);
vp4 = _mm512_mul_ps(vp4, vt4);
vt4 = _mm512_mul_ps(vt4, vs4);
vp5 = _mm512_mul_ps(vp5, vt5);
vt5 = _mm512_mul_ps(vt5, vs5);
vp6 = _mm512_mul_ps(vp6, vt6);
vt6 = _mm512_mul_ps(vt6, vs6);
vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
vs2 = _mm512_fmsub_ps(vs2, valpha, valpha);
vs3 = _mm512_fmsub_ps(vs3, valpha, valpha);
vs4 = _mm512_fmsub_ps(vs4, valpha, valpha);
vs5 = _mm512_fmsub_ps(vs5, valpha, valpha);
vs6 = _mm512_fmsub_ps(vs6, valpha, valpha);
vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vt2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vt3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vt4);
vp5 = _mm512_fmadd_ps(vp5, vt5, vt5);
vp6 = _mm512_fmadd_ps(vp6, vt6, vt6);
const __m512 vzero = _mm512_setzero_ps();
__m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
__m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
__m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2);
const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US);
__m512 vy3 = _mm512_fmadd_ps(vp3, valpha, vs3);
const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US);
__m512 vy4 = _mm512_fmadd_ps(vp4, valpha, vs4);
const __mmask16 vsign4 = _mm512_cmp_ps_mask(vx4, vzero, _CMP_NLT_US);
__m512 vy5 = _mm512_fmadd_ps(vp5, valpha, vs5);
const __mmask16 vsign5 = _mm512_cmp_ps_mask(vx5, vzero, _CMP_NLT_US);
__m512 vy6 = _mm512_fmadd_ps(vp6, valpha, vs6);
const __mmask16 vsign6 = _mm512_cmp_ps_mask(vx6, vzero, _CMP_NLT_US);
vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
vy2 = _mm512_mask_mul_ps(vy2, vsign2, vx2, vbeta);
vy3 = _mm512_mask_mul_ps(vy3, vsign3, vx3, vbeta);
vy4 = _mm512_mask_mul_ps(vy4, vsign4, vx4, vbeta);
vy5 = _mm512_mask_mul_ps(vy5, vsign5, vx5, vbeta);
vy6 = _mm512_mask_mul_ps(vy6, vsign6, vx6, vbeta);
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
output += 112;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
__m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
__m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 10,558 | 43.365546 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx512f-rr1-lut16-p3-perm-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x128(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vprescale = _mm512_set1_ps(params->avx512_rr1_lut16_p3.prescale);
const __m512 valpha = _mm512_set1_ps(params->avx512_rr1_lut16_p3.alpha);
const __m512 vbeta = _mm512_set1_ps(params->avx512_rr1_lut16_p3.beta);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_rr1_lut16_p3.sat_cutoff);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_lut16_p3.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_lut16_p3.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c2);
const __m512i vtable = _mm512_load_si512(params->avx512_rr1_lut16_p3.table);
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
__m512 vx0 = _mm512_loadu_ps(input);
__m512 vx1 = _mm512_loadu_ps(input + 16);
__m512 vx2 = _mm512_loadu_ps(input + 32);
__m512 vx3 = _mm512_loadu_ps(input + 48);
__m512 vx4 = _mm512_loadu_ps(input + 64);
__m512 vx5 = _mm512_loadu_ps(input + 80);
__m512 vx6 = _mm512_loadu_ps(input + 96);
__m512 vx7 = _mm512_loadu_ps(input + 112);
input += 128;
const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
const __m512 vz2 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx2, vprescale));
const __m512 vz3 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx3, vprescale));
const __m512 vz4 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx4, vprescale));
const __m512 vz5 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx5, vprescale));
const __m512 vz6 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx6, vprescale));
const __m512 vz7 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx7, vprescale));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vlog2e, vmagic_bias);
const __m512i ven0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 19);
const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
const __m512i ven1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 19);
const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
const __m512i ven2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 19);
const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
const __m512i ven3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 19);
const __m512i vl3 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn3), vtable);
const __m512i ven4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 19);
const __m512i vl4 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn4), vtable);
const __m512i ven5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 19);
const __m512i vl5 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn5), vtable);
const __m512i ven6 = _mm512_slli_epi32(_mm512_castps_si512(vn6), 19);
const __m512i vl6 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn6), vtable);
const __m512i ven7 = _mm512_slli_epi32(_mm512_castps_si512(vn7), 19);
const __m512i vl7 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn7), vtable);
__m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ven0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
__m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ven1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
__m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ven2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
__m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ven3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
__m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ven4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
__m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ven5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
__m512 vs6 = _mm512_castsi512_ps(_mm512_add_epi32(vl6, ven6));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
__m512 vs7 = _mm512_castsi512_ps(_mm512_add_epi32(vl7, ven7));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vz5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vz6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2, vz7);
__m512 vp0 = _mm512_fmadd_ps(vc3, vt0, vc2);
__m512 vp1 = _mm512_fmadd_ps(vc3, vt1, vc2);
__m512 vp2 = _mm512_fmadd_ps(vc3, vt2, vc2);
__m512 vp3 = _mm512_fmadd_ps(vc3, vt3, vc2);
__m512 vp4 = _mm512_fmadd_ps(vc3, vt4, vc2);
__m512 vp5 = _mm512_fmadd_ps(vc3, vt5, vc2);
__m512 vp6 = _mm512_fmadd_ps(vc3, vt6, vc2);
__m512 vp7 = _mm512_fmadd_ps(vc3, vt7, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vt0 = _mm512_mul_ps(vt0, vs0);
vp1 = _mm512_mul_ps(vp1, vt1);
vt1 = _mm512_mul_ps(vt1, vs1);
vp2 = _mm512_mul_ps(vp2, vt2);
vt2 = _mm512_mul_ps(vt2, vs2);
vp3 = _mm512_mul_ps(vp3, vt3);
vt3 = _mm512_mul_ps(vt3, vs3);
vp4 = _mm512_mul_ps(vp4, vt4);
vt4 = _mm512_mul_ps(vt4, vs4);
vp5 = _mm512_mul_ps(vp5, vt5);
vt5 = _mm512_mul_ps(vt5, vs5);
vp6 = _mm512_mul_ps(vp6, vt6);
vt6 = _mm512_mul_ps(vt6, vs6);
vp7 = _mm512_mul_ps(vp7, vt7);
vt7 = _mm512_mul_ps(vt7, vs7);
vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
vs2 = _mm512_fmsub_ps(vs2, valpha, valpha);
vs3 = _mm512_fmsub_ps(vs3, valpha, valpha);
vs4 = _mm512_fmsub_ps(vs4, valpha, valpha);
vs5 = _mm512_fmsub_ps(vs5, valpha, valpha);
vs6 = _mm512_fmsub_ps(vs6, valpha, valpha);
vs7 = _mm512_fmsub_ps(vs7, valpha, valpha);
vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vt2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vt3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vt4);
vp5 = _mm512_fmadd_ps(vp5, vt5, vt5);
vp6 = _mm512_fmadd_ps(vp6, vt6, vt6);
vp7 = _mm512_fmadd_ps(vp7, vt7, vt7);
const __m512 vzero = _mm512_setzero_ps();
__m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
__m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
__m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2);
const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US);
__m512 vy3 = _mm512_fmadd_ps(vp3, valpha, vs3);
const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US);
__m512 vy4 = _mm512_fmadd_ps(vp4, valpha, vs4);
const __mmask16 vsign4 = _mm512_cmp_ps_mask(vx4, vzero, _CMP_NLT_US);
__m512 vy5 = _mm512_fmadd_ps(vp5, valpha, vs5);
const __mmask16 vsign5 = _mm512_cmp_ps_mask(vx5, vzero, _CMP_NLT_US);
__m512 vy6 = _mm512_fmadd_ps(vp6, valpha, vs6);
const __mmask16 vsign6 = _mm512_cmp_ps_mask(vx6, vzero, _CMP_NLT_US);
__m512 vy7 = _mm512_fmadd_ps(vp7, valpha, vs7);
const __mmask16 vsign7 = _mm512_cmp_ps_mask(vx7, vzero, _CMP_NLT_US);
vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
vy2 = _mm512_mask_mul_ps(vy2, vsign2, vx2, vbeta);
vy3 = _mm512_mask_mul_ps(vy3, vsign3, vx3, vbeta);
vy4 = _mm512_mask_mul_ps(vy4, vsign4, vx4, vbeta);
vy5 = _mm512_mask_mul_ps(vy5, vsign5, vx5, vbeta);
vy6 = _mm512_mask_mul_ps(vy6, vsign6, vx6, vbeta);
vy7 = _mm512_mask_mul_ps(vy7, vsign7, vx7, vbeta);
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
output += 128;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
__m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
__m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 11,502 | 44.109804 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx512f-rr1-lut16-p3-perm-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vprescale = _mm512_set1_ps(params->avx512_rr1_lut16_p3.prescale);
const __m512 valpha = _mm512_set1_ps(params->avx512_rr1_lut16_p3.alpha);
const __m512 vbeta = _mm512_set1_ps(params->avx512_rr1_lut16_p3.beta);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_rr1_lut16_p3.sat_cutoff);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_lut16_p3.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_lut16_p3.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c2);
const __m512i vtable = _mm512_load_si512(params->avx512_rr1_lut16_p3.table);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
__m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
__m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 3,803 | 36.294118 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx512f-rr1-lut16-p3-perm-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vprescale = _mm512_set1_ps(params->avx512_rr1_lut16_p3.prescale);
const __m512 valpha = _mm512_set1_ps(params->avx512_rr1_lut16_p3.alpha);
const __m512 vbeta = _mm512_set1_ps(params->avx512_rr1_lut16_p3.beta);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_rr1_lut16_p3.sat_cutoff);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_lut16_p3.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_lut16_p3.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c2);
const __m512i vtable = _mm512_load_si512(params->avx512_rr1_lut16_p3.table);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m512 vx0 = _mm512_loadu_ps(input);
__m512 vx1 = _mm512_loadu_ps(input + 16);
input += 32;
const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m512i ven0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 19);
const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
const __m512i ven1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 19);
const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
__m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ven0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
__m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ven1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vp0 = _mm512_fmadd_ps(vc3, vt0, vc2);
__m512 vp1 = _mm512_fmadd_ps(vc3, vt1, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vt0 = _mm512_mul_ps(vt0, vs0);
vp1 = _mm512_mul_ps(vp1, vt1);
vt1 = _mm512_mul_ps(vt1, vs1);
vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
const __m512 vzero = _mm512_setzero_ps();
__m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
__m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
__m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
__m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 5,843 | 37.196078 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx512f-rr1-lut16-p3-perm-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vprescale = _mm512_set1_ps(params->avx512_rr1_lut16_p3.prescale);
const __m512 valpha = _mm512_set1_ps(params->avx512_rr1_lut16_p3.alpha);
const __m512 vbeta = _mm512_set1_ps(params->avx512_rr1_lut16_p3.beta);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_rr1_lut16_p3.sat_cutoff);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_lut16_p3.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_lut16_p3.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c2);
const __m512i vtable = _mm512_load_si512(params->avx512_rr1_lut16_p3.table);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
__m512 vx0 = _mm512_loadu_ps(input);
__m512 vx1 = _mm512_loadu_ps(input + 16);
__m512 vx2 = _mm512_loadu_ps(input + 32);
input += 48;
const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
const __m512 vz2 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx2, vprescale));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m512i ven0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 19);
const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
const __m512i ven1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 19);
const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
const __m512i ven2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 19);
const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
__m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ven0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
__m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ven1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
__m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ven2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vp0 = _mm512_fmadd_ps(vc3, vt0, vc2);
__m512 vp1 = _mm512_fmadd_ps(vc3, vt1, vc2);
__m512 vp2 = _mm512_fmadd_ps(vc3, vt2, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vt0 = _mm512_mul_ps(vt0, vs0);
vp1 = _mm512_mul_ps(vp1, vt1);
vt1 = _mm512_mul_ps(vt1, vs1);
vp2 = _mm512_mul_ps(vp2, vt2);
vt2 = _mm512_mul_ps(vt2, vs2);
vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
vs2 = _mm512_fmsub_ps(vs2, valpha, valpha);
vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vt2);
const __m512 vzero = _mm512_setzero_ps();
__m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
__m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
__m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2);
const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US);
vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
vy2 = _mm512_mask_mul_ps(vy2, vsign2, vx2, vbeta);
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
output += 48;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
__m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
__m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 6,785 | 38.917647 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx512f-rr1-lut16-p3-perm-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vprescale = _mm512_set1_ps(params->avx512_rr1_lut16_p3.prescale);
const __m512 valpha = _mm512_set1_ps(params->avx512_rr1_lut16_p3.alpha);
const __m512 vbeta = _mm512_set1_ps(params->avx512_rr1_lut16_p3.beta);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_rr1_lut16_p3.sat_cutoff);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_lut16_p3.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_lut16_p3.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c2);
const __m512i vtable = _mm512_load_si512(params->avx512_rr1_lut16_p3.table);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
__m512 vx0 = _mm512_loadu_ps(input);
__m512 vx1 = _mm512_loadu_ps(input + 16);
__m512 vx2 = _mm512_loadu_ps(input + 32);
__m512 vx3 = _mm512_loadu_ps(input + 48);
input += 64;
const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
const __m512 vz2 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx2, vprescale));
const __m512 vz3 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx3, vprescale));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m512i ven0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 19);
const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
const __m512i ven1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 19);
const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
const __m512i ven2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 19);
const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
const __m512i ven3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 19);
const __m512i vl3 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn3), vtable);
__m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ven0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
__m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ven1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
__m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ven2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
__m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ven3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vp0 = _mm512_fmadd_ps(vc3, vt0, vc2);
__m512 vp1 = _mm512_fmadd_ps(vc3, vt1, vc2);
__m512 vp2 = _mm512_fmadd_ps(vc3, vt2, vc2);
__m512 vp3 = _mm512_fmadd_ps(vc3, vt3, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vt0 = _mm512_mul_ps(vt0, vs0);
vp1 = _mm512_mul_ps(vp1, vt1);
vt1 = _mm512_mul_ps(vt1, vs1);
vp2 = _mm512_mul_ps(vp2, vt2);
vt2 = _mm512_mul_ps(vt2, vs2);
vp3 = _mm512_mul_ps(vp3, vt3);
vt3 = _mm512_mul_ps(vt3, vs3);
vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
vs2 = _mm512_fmsub_ps(vs2, valpha, valpha);
vs3 = _mm512_fmsub_ps(vs3, valpha, valpha);
vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vt2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vt3);
const __m512 vzero = _mm512_setzero_ps();
__m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
__m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
__m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2);
const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US);
__m512 vy3 = _mm512_fmadd_ps(vp3, valpha, vs3);
const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US);
vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
vy2 = _mm512_mask_mul_ps(vy2, vsign2, vx2, vbeta);
vy3 = _mm512_mask_mul_ps(vy3, vsign3, vx3, vbeta);
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
output += 64;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
__m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
__m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 7,727 | 40.326203 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx512f-rr1-lut16-p3-perm-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vprescale = _mm512_set1_ps(params->avx512_rr1_lut16_p3.prescale);
const __m512 valpha = _mm512_set1_ps(params->avx512_rr1_lut16_p3.alpha);
const __m512 vbeta = _mm512_set1_ps(params->avx512_rr1_lut16_p3.beta);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_rr1_lut16_p3.sat_cutoff);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_lut16_p3.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_lut16_p3.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c2);
const __m512i vtable = _mm512_load_si512(params->avx512_rr1_lut16_p3.table);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
__m512 vx0 = _mm512_loadu_ps(input);
__m512 vx1 = _mm512_loadu_ps(input + 16);
__m512 vx2 = _mm512_loadu_ps(input + 32);
__m512 vx3 = _mm512_loadu_ps(input + 48);
__m512 vx4 = _mm512_loadu_ps(input + 64);
input += 80;
const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
const __m512 vz2 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx2, vprescale));
const __m512 vz3 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx3, vprescale));
const __m512 vz4 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx4, vprescale));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m512i ven0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 19);
const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
const __m512i ven1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 19);
const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
const __m512i ven2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 19);
const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
const __m512i ven3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 19);
const __m512i vl3 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn3), vtable);
const __m512i ven4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 19);
const __m512i vl4 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn4), vtable);
__m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ven0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
__m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ven1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
__m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ven2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
__m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ven3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
__m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ven4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
__m512 vp0 = _mm512_fmadd_ps(vc3, vt0, vc2);
__m512 vp1 = _mm512_fmadd_ps(vc3, vt1, vc2);
__m512 vp2 = _mm512_fmadd_ps(vc3, vt2, vc2);
__m512 vp3 = _mm512_fmadd_ps(vc3, vt3, vc2);
__m512 vp4 = _mm512_fmadd_ps(vc3, vt4, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vt0 = _mm512_mul_ps(vt0, vs0);
vp1 = _mm512_mul_ps(vp1, vt1);
vt1 = _mm512_mul_ps(vt1, vs1);
vp2 = _mm512_mul_ps(vp2, vt2);
vt2 = _mm512_mul_ps(vt2, vs2);
vp3 = _mm512_mul_ps(vp3, vt3);
vt3 = _mm512_mul_ps(vt3, vs3);
vp4 = _mm512_mul_ps(vp4, vt4);
vt4 = _mm512_mul_ps(vt4, vs4);
vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
vs2 = _mm512_fmsub_ps(vs2, valpha, valpha);
vs3 = _mm512_fmsub_ps(vs3, valpha, valpha);
vs4 = _mm512_fmsub_ps(vs4, valpha, valpha);
vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vt2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vt3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vt4);
const __m512 vzero = _mm512_setzero_ps();
__m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
__m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
__m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2);
const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US);
__m512 vy3 = _mm512_fmadd_ps(vp3, valpha, vs3);
const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US);
__m512 vy4 = _mm512_fmadd_ps(vp4, valpha, vs4);
const __mmask16 vsign4 = _mm512_cmp_ps_mask(vx4, vzero, _CMP_NLT_US);
vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
vy2 = _mm512_mask_mul_ps(vy2, vsign2, vx2, vbeta);
vy3 = _mm512_mask_mul_ps(vy3, vsign3, vx3, vbeta);
vy4 = _mm512_mask_mul_ps(vy4, vsign4, vx4, vbeta);
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
output += 80;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
__m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
__m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 8,669 | 41.5 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx512f-rr1-lut16-p3-perm-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx512f-rr1-lut16-p3-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x96(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vprescale = _mm512_set1_ps(params->avx512_rr1_lut16_p3.prescale);
const __m512 valpha = _mm512_set1_ps(params->avx512_rr1_lut16_p3.alpha);
const __m512 vbeta = _mm512_set1_ps(params->avx512_rr1_lut16_p3.beta);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_rr1_lut16_p3.sat_cutoff);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_lut16_p3.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_lut16_p3.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.c2);
const __m512i vtable = _mm512_load_si512(params->avx512_rr1_lut16_p3.table);
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
__m512 vx0 = _mm512_loadu_ps(input);
__m512 vx1 = _mm512_loadu_ps(input + 16);
__m512 vx2 = _mm512_loadu_ps(input + 32);
__m512 vx3 = _mm512_loadu_ps(input + 48);
__m512 vx4 = _mm512_loadu_ps(input + 64);
__m512 vx5 = _mm512_loadu_ps(input + 80);
input += 96;
const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
const __m512 vz2 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx2, vprescale));
const __m512 vz3 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx3, vprescale));
const __m512 vz4 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx4, vprescale));
const __m512 vz5 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx5, vprescale));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vlog2e, vmagic_bias);
const __m512i ven0 = _mm512_slli_epi32(_mm512_castps_si512(vn0), 19);
const __m512i vl0 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn0), vtable);
const __m512i ven1 = _mm512_slli_epi32(_mm512_castps_si512(vn1), 19);
const __m512i vl1 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn1), vtable);
const __m512i ven2 = _mm512_slli_epi32(_mm512_castps_si512(vn2), 19);
const __m512i vl2 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn2), vtable);
const __m512i ven3 = _mm512_slli_epi32(_mm512_castps_si512(vn3), 19);
const __m512i vl3 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn3), vtable);
const __m512i ven4 = _mm512_slli_epi32(_mm512_castps_si512(vn4), 19);
const __m512i vl4 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn4), vtable);
const __m512i ven5 = _mm512_slli_epi32(_mm512_castps_si512(vn5), 19);
const __m512i vl5 = _mm512_permutexvar_epi32(_mm512_castps_si512(vn5), vtable);
__m512 vs0 = _mm512_castsi512_ps(_mm512_add_epi32(vl0, ven0));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
__m512 vs1 = _mm512_castsi512_ps(_mm512_add_epi32(vl1, ven1));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
__m512 vs2 = _mm512_castsi512_ps(_mm512_add_epi32(vl2, ven2));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
__m512 vs3 = _mm512_castsi512_ps(_mm512_add_epi32(vl3, ven3));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
__m512 vs4 = _mm512_castsi512_ps(_mm512_add_epi32(vl4, ven4));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
__m512 vs5 = _mm512_castsi512_ps(_mm512_add_epi32(vl5, ven5));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vz5);
__m512 vp0 = _mm512_fmadd_ps(vc3, vt0, vc2);
__m512 vp1 = _mm512_fmadd_ps(vc3, vt1, vc2);
__m512 vp2 = _mm512_fmadd_ps(vc3, vt2, vc2);
__m512 vp3 = _mm512_fmadd_ps(vc3, vt3, vc2);
__m512 vp4 = _mm512_fmadd_ps(vc3, vt4, vc2);
__m512 vp5 = _mm512_fmadd_ps(vc3, vt5, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vt0 = _mm512_mul_ps(vt0, vs0);
vp1 = _mm512_mul_ps(vp1, vt1);
vt1 = _mm512_mul_ps(vt1, vs1);
vp2 = _mm512_mul_ps(vp2, vt2);
vt2 = _mm512_mul_ps(vt2, vs2);
vp3 = _mm512_mul_ps(vp3, vt3);
vt3 = _mm512_mul_ps(vt3, vs3);
vp4 = _mm512_mul_ps(vp4, vt4);
vt4 = _mm512_mul_ps(vt4, vs4);
vp5 = _mm512_mul_ps(vp5, vt5);
vt5 = _mm512_mul_ps(vt5, vs5);
vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
vs2 = _mm512_fmsub_ps(vs2, valpha, valpha);
vs3 = _mm512_fmsub_ps(vs3, valpha, valpha);
vs4 = _mm512_fmsub_ps(vs4, valpha, valpha);
vs5 = _mm512_fmsub_ps(vs5, valpha, valpha);
vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vt2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vt3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vt4);
vp5 = _mm512_fmadd_ps(vp5, vt5, vt5);
const __m512 vzero = _mm512_setzero_ps();
__m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
__m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
__m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2);
const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US);
__m512 vy3 = _mm512_fmadd_ps(vp3, valpha, vs3);
const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US);
__m512 vy4 = _mm512_fmadd_ps(vp4, valpha, vs4);
const __mmask16 vsign4 = _mm512_cmp_ps_mask(vx4, vzero, _CMP_NLT_US);
__m512 vy5 = _mm512_fmadd_ps(vp5, valpha, vs5);
const __mmask16 vsign5 = _mm512_cmp_ps_mask(vx5, vzero, _CMP_NLT_US);
vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
vy2 = _mm512_mask_mul_ps(vy2, vsign2, vx2, vbeta);
vy3 = _mm512_mask_mul_ps(vy3, vsign3, vx3, vbeta);
vy4 = _mm512_mask_mul_ps(vy4, vsign4, vx4, vbeta);
vy5 = _mm512_mask_mul_ps(vy5, vsign5, vx5, vbeta);
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
output += 96;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
__m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m512i ven = _mm512_slli_epi32(_mm512_castps_si512(vn), 19);
const __m512i vl = _mm512_permutexvar_epi32(_mm512_castps_si512(vn), vtable);
__m512 vs = _mm512_castsi512_ps(_mm512_add_epi32(vl, ven));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc3, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 9,611 | 42.493213 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx512f-rr1-p6-x112.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx512f-rr1-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx512f_rr1_p6_x112(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vprescale = _mm512_set1_ps(params->avx512_rr1_p6.prescale);
const __m512 valpha = _mm512_set1_ps(params->avx512_rr1_p6.alpha);
const __m512 vbeta = _mm512_set1_ps(params->avx512_rr1_p6.beta);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_rr1_p6.sat_cutoff);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_p6.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p6.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p6.minus_ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_rr1_p6.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p6.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p6.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p6.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p6.c2);
for (; batch >= 112 * sizeof(float); batch -= 112 * sizeof(float)) {
__m512 vx0 = _mm512_loadu_ps(input);
__m512 vx1 = _mm512_loadu_ps(input + 16);
__m512 vx2 = _mm512_loadu_ps(input + 32);
__m512 vx3 = _mm512_loadu_ps(input + 48);
__m512 vx4 = _mm512_loadu_ps(input + 64);
__m512 vx5 = _mm512_loadu_ps(input + 80);
__m512 vx6 = _mm512_loadu_ps(input + 96);
input += 112;
const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
const __m512 vz2 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx2, vprescale));
const __m512 vz3 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx3, vprescale));
const __m512 vz4 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx4, vprescale));
const __m512 vz5 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx5, vprescale));
const __m512 vz6 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx6, vprescale));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
__m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
__m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
__m512 vs3 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn3), 23));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
__m512 vs4 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn4), 23));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
__m512 vs5 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn5), 23));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
__m512 vs6 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn6), 23));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vz5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vz6);
__m512 vp0 = _mm512_fmadd_ps(vc6, vt0, vc5);
__m512 vp1 = _mm512_fmadd_ps(vc6, vt1, vc5);
__m512 vp2 = _mm512_fmadd_ps(vc6, vt2, vc5);
__m512 vp3 = _mm512_fmadd_ps(vc6, vt3, vc5);
__m512 vp4 = _mm512_fmadd_ps(vc6, vt4, vc5);
__m512 vp5 = _mm512_fmadd_ps(vc6, vt5, vc5);
__m512 vp6 = _mm512_fmadd_ps(vc6, vt6, vc5);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vt0 = _mm512_mul_ps(vt0, vs0);
vp1 = _mm512_mul_ps(vp1, vt1);
vt1 = _mm512_mul_ps(vt1, vs1);
vp2 = _mm512_mul_ps(vp2, vt2);
vt2 = _mm512_mul_ps(vt2, vs2);
vp3 = _mm512_mul_ps(vp3, vt3);
vt3 = _mm512_mul_ps(vt3, vs3);
vp4 = _mm512_mul_ps(vp4, vt4);
vt4 = _mm512_mul_ps(vt4, vs4);
vp5 = _mm512_mul_ps(vp5, vt5);
vt5 = _mm512_mul_ps(vt5, vs5);
vp6 = _mm512_mul_ps(vp6, vt6);
vt6 = _mm512_mul_ps(vt6, vs6);
vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
vs2 = _mm512_fmsub_ps(vs2, valpha, valpha);
vs3 = _mm512_fmsub_ps(vs3, valpha, valpha);
vs4 = _mm512_fmsub_ps(vs4, valpha, valpha);
vs5 = _mm512_fmsub_ps(vs5, valpha, valpha);
vs6 = _mm512_fmsub_ps(vs6, valpha, valpha);
vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vt2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vt3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vt4);
vp5 = _mm512_fmadd_ps(vp5, vt5, vt5);
vp6 = _mm512_fmadd_ps(vp6, vt6, vt6);
const __m512 vzero = _mm512_setzero_ps();
__m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
__m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
__m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2);
const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US);
__m512 vy3 = _mm512_fmadd_ps(vp3, valpha, vs3);
const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US);
__m512 vy4 = _mm512_fmadd_ps(vp4, valpha, vs4);
const __mmask16 vsign4 = _mm512_cmp_ps_mask(vx4, vzero, _CMP_NLT_US);
__m512 vy5 = _mm512_fmadd_ps(vp5, valpha, vs5);
const __mmask16 vsign5 = _mm512_cmp_ps_mask(vx5, vzero, _CMP_NLT_US);
__m512 vy6 = _mm512_fmadd_ps(vp6, valpha, vs6);
const __mmask16 vsign6 = _mm512_cmp_ps_mask(vx6, vzero, _CMP_NLT_US);
vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
vy2 = _mm512_mask_mul_ps(vy2, vsign2, vx2, vbeta);
vy3 = _mm512_mask_mul_ps(vy3, vsign3, vx3, vbeta);
vy4 = _mm512_mask_mul_ps(vy4, vsign4, vx4, vbeta);
vy5 = _mm512_mask_mul_ps(vy5, vsign5, vx5, vbeta);
vy6 = _mm512_mask_mul_ps(vy6, vsign6, vx6, vbeta);
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
output += 112;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
__m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
__m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 10,478 | 40.749004 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx512f-rr1-p6-x128.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx512f-rr1-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx512f_rr1_p6_x128(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vprescale = _mm512_set1_ps(params->avx512_rr1_p6.prescale);
const __m512 valpha = _mm512_set1_ps(params->avx512_rr1_p6.alpha);
const __m512 vbeta = _mm512_set1_ps(params->avx512_rr1_p6.beta);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_rr1_p6.sat_cutoff);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_p6.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p6.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p6.minus_ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_rr1_p6.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p6.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p6.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p6.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p6.c2);
for (; batch >= 128 * sizeof(float); batch -= 128 * sizeof(float)) {
__m512 vx0 = _mm512_loadu_ps(input);
__m512 vx1 = _mm512_loadu_ps(input + 16);
__m512 vx2 = _mm512_loadu_ps(input + 32);
__m512 vx3 = _mm512_loadu_ps(input + 48);
__m512 vx4 = _mm512_loadu_ps(input + 64);
__m512 vx5 = _mm512_loadu_ps(input + 80);
__m512 vx6 = _mm512_loadu_ps(input + 96);
__m512 vx7 = _mm512_loadu_ps(input + 112);
input += 128;
const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
const __m512 vz2 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx2, vprescale));
const __m512 vz3 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx3, vprescale));
const __m512 vz4 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx4, vprescale));
const __m512 vz5 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx5, vprescale));
const __m512 vz6 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx6, vprescale));
const __m512 vz7 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx7, vprescale));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m512 vn6 = _mm512_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m512 vn7 = _mm512_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
__m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
__m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
__m512 vs3 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn3), 23));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
__m512 vs4 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn4), 23));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
__m512 vs5 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn5), 23));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
__m512 vs6 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn6), 23));
vn6 = _mm512_sub_ps(vn6, vmagic_bias);
__m512 vs7 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn7), 23));
vn7 = _mm512_sub_ps(vn7, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vz5);
__m512 vt6 = _mm512_fmadd_ps(vn6, vminus_ln2, vz6);
__m512 vt7 = _mm512_fmadd_ps(vn7, vminus_ln2, vz7);
__m512 vp0 = _mm512_fmadd_ps(vc6, vt0, vc5);
__m512 vp1 = _mm512_fmadd_ps(vc6, vt1, vc5);
__m512 vp2 = _mm512_fmadd_ps(vc6, vt2, vc5);
__m512 vp3 = _mm512_fmadd_ps(vc6, vt3, vc5);
__m512 vp4 = _mm512_fmadd_ps(vc6, vt4, vc5);
__m512 vp5 = _mm512_fmadd_ps(vc6, vt5, vc5);
__m512 vp6 = _mm512_fmadd_ps(vc6, vt6, vc5);
__m512 vp7 = _mm512_fmadd_ps(vc6, vt7, vc5);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc4);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc4);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc3);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm512_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm512_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vt0 = _mm512_mul_ps(vt0, vs0);
vp1 = _mm512_mul_ps(vp1, vt1);
vt1 = _mm512_mul_ps(vt1, vs1);
vp2 = _mm512_mul_ps(vp2, vt2);
vt2 = _mm512_mul_ps(vt2, vs2);
vp3 = _mm512_mul_ps(vp3, vt3);
vt3 = _mm512_mul_ps(vt3, vs3);
vp4 = _mm512_mul_ps(vp4, vt4);
vt4 = _mm512_mul_ps(vt4, vs4);
vp5 = _mm512_mul_ps(vp5, vt5);
vt5 = _mm512_mul_ps(vt5, vs5);
vp6 = _mm512_mul_ps(vp6, vt6);
vt6 = _mm512_mul_ps(vt6, vs6);
vp7 = _mm512_mul_ps(vp7, vt7);
vt7 = _mm512_mul_ps(vt7, vs7);
vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
vs2 = _mm512_fmsub_ps(vs2, valpha, valpha);
vs3 = _mm512_fmsub_ps(vs3, valpha, valpha);
vs4 = _mm512_fmsub_ps(vs4, valpha, valpha);
vs5 = _mm512_fmsub_ps(vs5, valpha, valpha);
vs6 = _mm512_fmsub_ps(vs6, valpha, valpha);
vs7 = _mm512_fmsub_ps(vs7, valpha, valpha);
vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vt2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vt3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vt4);
vp5 = _mm512_fmadd_ps(vp5, vt5, vt5);
vp6 = _mm512_fmadd_ps(vp6, vt6, vt6);
vp7 = _mm512_fmadd_ps(vp7, vt7, vt7);
const __m512 vzero = _mm512_setzero_ps();
__m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
__m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
__m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2);
const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US);
__m512 vy3 = _mm512_fmadd_ps(vp3, valpha, vs3);
const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US);
__m512 vy4 = _mm512_fmadd_ps(vp4, valpha, vs4);
const __mmask16 vsign4 = _mm512_cmp_ps_mask(vx4, vzero, _CMP_NLT_US);
__m512 vy5 = _mm512_fmadd_ps(vp5, valpha, vs5);
const __mmask16 vsign5 = _mm512_cmp_ps_mask(vx5, vzero, _CMP_NLT_US);
__m512 vy6 = _mm512_fmadd_ps(vp6, valpha, vs6);
const __mmask16 vsign6 = _mm512_cmp_ps_mask(vx6, vzero, _CMP_NLT_US);
__m512 vy7 = _mm512_fmadd_ps(vp7, valpha, vs7);
const __mmask16 vsign7 = _mm512_cmp_ps_mask(vx7, vzero, _CMP_NLT_US);
vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
vy2 = _mm512_mask_mul_ps(vy2, vsign2, vx2, vbeta);
vy3 = _mm512_mask_mul_ps(vy3, vsign3, vx3, vbeta);
vy4 = _mm512_mask_mul_ps(vy4, vsign4, vx4, vbeta);
vy5 = _mm512_mask_mul_ps(vy5, vsign5, vx5, vbeta);
vy6 = _mm512_mask_mul_ps(vy6, vsign6, vx6, vbeta);
vy7 = _mm512_mask_mul_ps(vy7, vsign7, vx7, vbeta);
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
_mm512_storeu_ps(output + 96, vy6);
_mm512_storeu_ps(output + 112, vy7);
output += 128;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
__m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
__m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 11,410 | 41.420074 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx512f-rr1-p6-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx512f-rr1-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx512f_rr1_p6_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vprescale = _mm512_set1_ps(params->avx512_rr1_p6.prescale);
const __m512 valpha = _mm512_set1_ps(params->avx512_rr1_p6.alpha);
const __m512 vbeta = _mm512_set1_ps(params->avx512_rr1_p6.beta);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_rr1_p6.sat_cutoff);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_p6.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p6.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p6.minus_ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_rr1_p6.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p6.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p6.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p6.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p6.c2);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
__m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
__m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 3,805 | 34.90566 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx512f-rr1-p6-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx512f-rr1-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx512f_rr1_p6_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vprescale = _mm512_set1_ps(params->avx512_rr1_p6.prescale);
const __m512 valpha = _mm512_set1_ps(params->avx512_rr1_p6.alpha);
const __m512 vbeta = _mm512_set1_ps(params->avx512_rr1_p6.beta);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_rr1_p6.sat_cutoff);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_p6.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p6.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p6.minus_ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_rr1_p6.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p6.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p6.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p6.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p6.c2);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m512 vx0 = _mm512_loadu_ps(input);
__m512 vx1 = _mm512_loadu_ps(input + 16);
input += 32;
const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
__m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vp0 = _mm512_fmadd_ps(vc6, vt0, vc5);
__m512 vp1 = _mm512_fmadd_ps(vc6, vt1, vc5);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vt0 = _mm512_mul_ps(vt0, vs0);
vp1 = _mm512_mul_ps(vp1, vt1);
vt1 = _mm512_mul_ps(vt1, vs1);
vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
const __m512 vzero = _mm512_setzero_ps();
__m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
__m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
__m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
__m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 5,823 | 35.173913 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx512f-rr1-p6-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx512f-rr1-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx512f_rr1_p6_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vprescale = _mm512_set1_ps(params->avx512_rr1_p6.prescale);
const __m512 valpha = _mm512_set1_ps(params->avx512_rr1_p6.alpha);
const __m512 vbeta = _mm512_set1_ps(params->avx512_rr1_p6.beta);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_rr1_p6.sat_cutoff);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_p6.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p6.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p6.minus_ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_rr1_p6.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p6.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p6.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p6.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p6.c2);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
__m512 vx0 = _mm512_loadu_ps(input);
__m512 vx1 = _mm512_loadu_ps(input + 16);
__m512 vx2 = _mm512_loadu_ps(input + 32);
input += 48;
const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
const __m512 vz2 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx2, vprescale));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
__m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
__m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vp0 = _mm512_fmadd_ps(vc6, vt0, vc5);
__m512 vp1 = _mm512_fmadd_ps(vc6, vt1, vc5);
__m512 vp2 = _mm512_fmadd_ps(vc6, vt2, vc5);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vt0 = _mm512_mul_ps(vt0, vs0);
vp1 = _mm512_mul_ps(vp1, vt1);
vt1 = _mm512_mul_ps(vt1, vs1);
vp2 = _mm512_mul_ps(vp2, vt2);
vt2 = _mm512_mul_ps(vt2, vs2);
vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
vs2 = _mm512_fmsub_ps(vs2, valpha, valpha);
vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vt2);
const __m512 vzero = _mm512_setzero_ps();
__m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
__m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
__m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2);
const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US);
vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
vy2 = _mm512_mask_mul_ps(vy2, vsign2, vx2, vbeta);
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
output += 48;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
__m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
__m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 6,753 | 36.731844 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx512f-rr1-p6-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx512f-rr1-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx512f_rr1_p6_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vprescale = _mm512_set1_ps(params->avx512_rr1_p6.prescale);
const __m512 valpha = _mm512_set1_ps(params->avx512_rr1_p6.alpha);
const __m512 vbeta = _mm512_set1_ps(params->avx512_rr1_p6.beta);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_rr1_p6.sat_cutoff);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_p6.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p6.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p6.minus_ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_rr1_p6.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p6.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p6.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p6.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p6.c2);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
__m512 vx0 = _mm512_loadu_ps(input);
__m512 vx1 = _mm512_loadu_ps(input + 16);
__m512 vx2 = _mm512_loadu_ps(input + 32);
__m512 vx3 = _mm512_loadu_ps(input + 48);
input += 64;
const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
const __m512 vz2 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx2, vprescale));
const __m512 vz3 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx3, vprescale));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
__m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
__m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
__m512 vs3 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn3), 23));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vp0 = _mm512_fmadd_ps(vc6, vt0, vc5);
__m512 vp1 = _mm512_fmadd_ps(vc6, vt1, vc5);
__m512 vp2 = _mm512_fmadd_ps(vc6, vt2, vc5);
__m512 vp3 = _mm512_fmadd_ps(vc6, vt3, vc5);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vt0 = _mm512_mul_ps(vt0, vs0);
vp1 = _mm512_mul_ps(vp1, vt1);
vt1 = _mm512_mul_ps(vt1, vs1);
vp2 = _mm512_mul_ps(vp2, vt2);
vt2 = _mm512_mul_ps(vt2, vs2);
vp3 = _mm512_mul_ps(vp3, vt3);
vt3 = _mm512_mul_ps(vt3, vs3);
vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
vs2 = _mm512_fmsub_ps(vs2, valpha, valpha);
vs3 = _mm512_fmsub_ps(vs3, valpha, valpha);
vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vt2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vt3);
const __m512 vzero = _mm512_setzero_ps();
__m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
__m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
__m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2);
const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US);
__m512 vy3 = _mm512_fmadd_ps(vp3, valpha, vs3);
const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US);
vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
vy2 = _mm512_mask_mul_ps(vy2, vsign2, vx2, vbeta);
vy3 = _mm512_mask_mul_ps(vy3, vsign3, vx3, vbeta);
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
output += 64;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
__m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
__m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 7,683 | 38.005076 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx512f-rr1-p6-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx512f-rr1-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx512f_rr1_p6_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vprescale = _mm512_set1_ps(params->avx512_rr1_p6.prescale);
const __m512 valpha = _mm512_set1_ps(params->avx512_rr1_p6.alpha);
const __m512 vbeta = _mm512_set1_ps(params->avx512_rr1_p6.beta);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_rr1_p6.sat_cutoff);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_p6.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p6.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p6.minus_ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_rr1_p6.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p6.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p6.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p6.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p6.c2);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
__m512 vx0 = _mm512_loadu_ps(input);
__m512 vx1 = _mm512_loadu_ps(input + 16);
__m512 vx2 = _mm512_loadu_ps(input + 32);
__m512 vx3 = _mm512_loadu_ps(input + 48);
__m512 vx4 = _mm512_loadu_ps(input + 64);
input += 80;
const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
const __m512 vz2 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx2, vprescale));
const __m512 vz3 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx3, vprescale));
const __m512 vz4 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx4, vprescale));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
__m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
__m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
__m512 vs3 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn3), 23));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
__m512 vs4 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn4), 23));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
__m512 vp0 = _mm512_fmadd_ps(vc6, vt0, vc5);
__m512 vp1 = _mm512_fmadd_ps(vc6, vt1, vc5);
__m512 vp2 = _mm512_fmadd_ps(vc6, vt2, vc5);
__m512 vp3 = _mm512_fmadd_ps(vc6, vt3, vc5);
__m512 vp4 = _mm512_fmadd_ps(vc6, vt4, vc5);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vt0 = _mm512_mul_ps(vt0, vs0);
vp1 = _mm512_mul_ps(vp1, vt1);
vt1 = _mm512_mul_ps(vt1, vs1);
vp2 = _mm512_mul_ps(vp2, vt2);
vt2 = _mm512_mul_ps(vt2, vs2);
vp3 = _mm512_mul_ps(vp3, vt3);
vt3 = _mm512_mul_ps(vt3, vs3);
vp4 = _mm512_mul_ps(vp4, vt4);
vt4 = _mm512_mul_ps(vt4, vs4);
vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
vs2 = _mm512_fmsub_ps(vs2, valpha, valpha);
vs3 = _mm512_fmsub_ps(vs3, valpha, valpha);
vs4 = _mm512_fmsub_ps(vs4, valpha, valpha);
vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vt2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vt3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vt4);
const __m512 vzero = _mm512_setzero_ps();
__m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
__m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
__m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2);
const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US);
__m512 vy3 = _mm512_fmadd_ps(vp3, valpha, vs3);
const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US);
__m512 vy4 = _mm512_fmadd_ps(vp4, valpha, vs4);
const __mmask16 vsign4 = _mm512_cmp_ps_mask(vx4, vzero, _CMP_NLT_US);
vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
vy2 = _mm512_mask_mul_ps(vy2, vsign2, vx2, vbeta);
vy3 = _mm512_mask_mul_ps(vy3, vsign3, vx3, vbeta);
vy4 = _mm512_mask_mul_ps(vy4, vsign4, vx4, vbeta);
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
output += 80;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
__m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
__m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 8,613 | 39.065116 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx512f-rr1-p6-x96.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx512f-rr1-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx512f_rr1_p6_x96(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vprescale = _mm512_set1_ps(params->avx512_rr1_p6.prescale);
const __m512 valpha = _mm512_set1_ps(params->avx512_rr1_p6.alpha);
const __m512 vbeta = _mm512_set1_ps(params->avx512_rr1_p6.beta);
const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_rr1_p6.sat_cutoff);
const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_p6.magic_bias);
const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p6.log2e);
const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p6.minus_ln2);
const __m512 vc6 = _mm512_set1_ps(params->avx512_rr1_p6.c6);
const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p6.c5);
const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p6.c4);
const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p6.c3);
const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p6.c2);
for (; batch >= 96 * sizeof(float); batch -= 96 * sizeof(float)) {
__m512 vx0 = _mm512_loadu_ps(input);
__m512 vx1 = _mm512_loadu_ps(input + 16);
__m512 vx2 = _mm512_loadu_ps(input + 32);
__m512 vx3 = _mm512_loadu_ps(input + 48);
__m512 vx4 = _mm512_loadu_ps(input + 64);
__m512 vx5 = _mm512_loadu_ps(input + 80);
input += 96;
const __m512 vz0 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx0, vprescale));
const __m512 vz1 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx1, vprescale));
const __m512 vz2 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx2, vprescale));
const __m512 vz3 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx3, vprescale));
const __m512 vz4 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx4, vprescale));
const __m512 vz5 = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx5, vprescale));
__m512 vn0 = _mm512_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m512 vn1 = _mm512_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m512 vn2 = _mm512_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m512 vn5 = _mm512_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m512 vs0 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn0), 23));
vn0 = _mm512_sub_ps(vn0, vmagic_bias);
__m512 vs1 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn1), 23));
vn1 = _mm512_sub_ps(vn1, vmagic_bias);
__m512 vs2 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn2), 23));
vn2 = _mm512_sub_ps(vn2, vmagic_bias);
__m512 vs3 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn3), 23));
vn3 = _mm512_sub_ps(vn3, vmagic_bias);
__m512 vs4 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn4), 23));
vn4 = _mm512_sub_ps(vn4, vmagic_bias);
__m512 vs5 = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn5), 23));
vn5 = _mm512_sub_ps(vn5, vmagic_bias);
__m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
__m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
__m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
__m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
__m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
__m512 vt5 = _mm512_fmadd_ps(vn5, vminus_ln2, vz5);
__m512 vp0 = _mm512_fmadd_ps(vc6, vt0, vc5);
__m512 vp1 = _mm512_fmadd_ps(vc6, vt1, vc5);
__m512 vp2 = _mm512_fmadd_ps(vc6, vt2, vc5);
__m512 vp3 = _mm512_fmadd_ps(vc6, vt3, vc5);
__m512 vp4 = _mm512_fmadd_ps(vc6, vt4, vc5);
__m512 vp5 = _mm512_fmadd_ps(vc6, vt5, vc5);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc4);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc4);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc4);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc4);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc4);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc4);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc3);
vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm512_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm512_mul_ps(vp0, vt0);
vt0 = _mm512_mul_ps(vt0, vs0);
vp1 = _mm512_mul_ps(vp1, vt1);
vt1 = _mm512_mul_ps(vt1, vs1);
vp2 = _mm512_mul_ps(vp2, vt2);
vt2 = _mm512_mul_ps(vt2, vs2);
vp3 = _mm512_mul_ps(vp3, vt3);
vt3 = _mm512_mul_ps(vt3, vs3);
vp4 = _mm512_mul_ps(vp4, vt4);
vt4 = _mm512_mul_ps(vt4, vs4);
vp5 = _mm512_mul_ps(vp5, vt5);
vt5 = _mm512_mul_ps(vt5, vs5);
vs0 = _mm512_fmsub_ps(vs0, valpha, valpha);
vs1 = _mm512_fmsub_ps(vs1, valpha, valpha);
vs2 = _mm512_fmsub_ps(vs2, valpha, valpha);
vs3 = _mm512_fmsub_ps(vs3, valpha, valpha);
vs4 = _mm512_fmsub_ps(vs4, valpha, valpha);
vs5 = _mm512_fmsub_ps(vs5, valpha, valpha);
vp0 = _mm512_fmadd_ps(vp0, vt0, vt0);
vp1 = _mm512_fmadd_ps(vp1, vt1, vt1);
vp2 = _mm512_fmadd_ps(vp2, vt2, vt2);
vp3 = _mm512_fmadd_ps(vp3, vt3, vt3);
vp4 = _mm512_fmadd_ps(vp4, vt4, vt4);
vp5 = _mm512_fmadd_ps(vp5, vt5, vt5);
const __m512 vzero = _mm512_setzero_ps();
__m512 vy0 = _mm512_fmadd_ps(vp0, valpha, vs0);
const __mmask16 vsign0 = _mm512_cmp_ps_mask(vx0, vzero, _CMP_NLT_US);
__m512 vy1 = _mm512_fmadd_ps(vp1, valpha, vs1);
const __mmask16 vsign1 = _mm512_cmp_ps_mask(vx1, vzero, _CMP_NLT_US);
__m512 vy2 = _mm512_fmadd_ps(vp2, valpha, vs2);
const __mmask16 vsign2 = _mm512_cmp_ps_mask(vx2, vzero, _CMP_NLT_US);
__m512 vy3 = _mm512_fmadd_ps(vp3, valpha, vs3);
const __mmask16 vsign3 = _mm512_cmp_ps_mask(vx3, vzero, _CMP_NLT_US);
__m512 vy4 = _mm512_fmadd_ps(vp4, valpha, vs4);
const __mmask16 vsign4 = _mm512_cmp_ps_mask(vx4, vzero, _CMP_NLT_US);
__m512 vy5 = _mm512_fmadd_ps(vp5, valpha, vs5);
const __mmask16 vsign5 = _mm512_cmp_ps_mask(vx5, vzero, _CMP_NLT_US);
vy0 = _mm512_mask_mul_ps(vy0, vsign0, vx0, vbeta);
vy1 = _mm512_mask_mul_ps(vy1, vsign1, vx1, vbeta);
vy2 = _mm512_mask_mul_ps(vy2, vsign2, vx2, vbeta);
vy3 = _mm512_mask_mul_ps(vy3, vsign3, vx3, vbeta);
vy4 = _mm512_mask_mul_ps(vy4, vsign4, vx4, vbeta);
vy5 = _mm512_mask_mul_ps(vy5, vsign5, vx5, vbeta);
_mm512_storeu_ps(output, vy0);
_mm512_storeu_ps(output + 16, vy1);
_mm512_storeu_ps(output + 32, vy2);
_mm512_storeu_ps(output + 48, vy3);
_mm512_storeu_ps(output + 64, vy4);
_mm512_storeu_ps(output + 80, vy5);
output += 96;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vx = _mm512_loadu_ps(input);
input += 16;
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
__m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_storeu_ps(output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vx = _mm512_maskz_loadu_ps(vmask, input);
const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale));
const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US);
__m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
__m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23));
vn = _mm512_sub_ps(vn, vmagic_bias);
__m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
__m512 vp = _mm512_fmadd_ps(vc6, vt, vc5);
vp = _mm512_fmadd_ps(vp, vt, vc4);
vp = _mm512_fmadd_ps(vp, vt, vc3);
vp = _mm512_fmadd_ps(vp, vt, vc2);
vp = _mm512_mul_ps(vp, vt);
vt = _mm512_mul_ps(vt, vs);
vs = _mm512_fmsub_ps(vs, valpha, valpha);
vp = _mm512_fmadd_ps(vp, vt, vt);
__m512 vy = _mm512_fmadd_ps(vp, valpha, vs);
vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta);
_mm512_mask_storeu_ps(output, vmask, vy);
}
}
| 9,543 | 39.961373 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-neon-rr2-lut16-p3-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/neon-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vprescale = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.prescale);
const float32x4_t valpha = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.alpha);
const float32x4_t vbeta = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.beta);
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.sat_cutoff);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.magic_bias);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.log2e);
const int32x4_t vindex_mask = vmovq_n_s32(0xF);
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.minus_ln2_lo);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vlog2e);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask), 2));
const int32x4_t ven0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 19);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask), 2));
const int32x4_t ven4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 19);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask), 2));
const int32x4_t ven89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 19);
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
int32x2_t vl01 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
int32x2_t vl23 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
vl01 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)), vl01, 1);
vl23 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)), vl23, 1);
const int32x4_t vl0123 = vcombine_s32(vl01, vl23);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
int32x2_t vl45 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
int32x2_t vl67 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
vl45 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)), vl45, 1);
vl67 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)), vl67, 1);
const int32x4_t vl4567 = vcombine_s32(vl45, vl67);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
int32x2_t vl89 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
int32x2_t vlAB = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
vl89 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)), vl89, 1);
vlAB = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)), vlAB, 1);
const int32x4_t vl89AB = vcombine_s32(vl89, vlAB);
vn0123 = vsubq_f32(vn0123, vmagic_bias);
float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vl0123, ven0123));
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vl4567, ven4567));
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vl89AB, ven89AB));
float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc2, vc3, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc2, vc3, vt4567);
float32x4_t vp89AB = vmlaq_f32(vc2, vc3, vt89AB);
vp0123 = vmulq_f32(vp0123, vt0123);
vp4567 = vmulq_f32(vp4567, vt4567);
vp89AB = vmulq_f32(vp89AB, vt89AB);
vt0123 = vmulq_f32(vt0123, vs0123);
vs0123 = vsubq_f32(vs0123, vone);
vt4567 = vmulq_f32(vt4567, vs4567);
vs4567 = vsubq_f32(vs4567, vone);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vs89AB = vsubq_f32(vs89AB, vone);
vp0123 = vmlaq_f32(vt0123, vp0123, vt0123);
vp4567 = vmlaq_f32(vt4567, vp4567, vt4567);
vp89AB = vmlaq_f32(vt89AB, vp89AB, vt89AB);
const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
vx0123 = vmulq_f32(vx0123, vbeta);
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vx4567 = vmulq_f32(vx4567, vbeta);
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vx89AB = vmulq_f32(vx89AB, vbeta);
const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
vn = vsubq_f32(vn, vmagic_bias);
const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
vn = vsubq_f32(vn, vmagic_bias);
const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 10,823 | 47.977376 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-neon-rr2-lut16-p3-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/neon-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vprescale = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.prescale);
const float32x4_t valpha = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.alpha);
const float32x4_t vbeta = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.beta);
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.sat_cutoff);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.magic_bias);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.log2e);
const int32x4_t vindex_mask = vmovq_n_s32(0xF);
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.minus_ln2_lo);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vlog2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vzCDEF, vlog2e);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask), 2));
const int32x4_t ven0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 19);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask), 2));
const int32x4_t ven4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 19);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask), 2));
const int32x4_t ven89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 19);
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask), 2));
const int32x4_t venCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 19);
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
int32x2_t vl01 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
int32x2_t vl23 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
vl01 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)), vl01, 1);
vl23 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)), vl23, 1);
const int32x4_t vl0123 = vcombine_s32(vl01, vl23);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
int32x2_t vl45 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
int32x2_t vl67 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
vl45 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)), vl45, 1);
vl67 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)), vl67, 1);
const int32x4_t vl4567 = vcombine_s32(vl45, vl67);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
int32x2_t vl89 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
int32x2_t vlAB = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
vl89 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)), vl89, 1);
vlAB = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)), vlAB, 1);
const int32x4_t vl89AB = vcombine_s32(vl89, vlAB);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
int32x2_t vlCD = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD));
int32x2_t vlEF = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF));
vlCD = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32)), vlCD, 1);
vlEF = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32)), vlEF, 1);
const int32x4_t vlCDEF = vcombine_s32(vlCD, vlEF);
vn0123 = vsubq_f32(vn0123, vmagic_bias);
float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vl0123, ven0123));
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vl4567, ven4567));
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vl89AB, ven89AB));
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vlCDEF, venCDEF));
float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vminus_ln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vzCDEF, vnCDEF, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc2, vc3, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc2, vc3, vt4567);
float32x4_t vp89AB = vmlaq_f32(vc2, vc3, vt89AB);
float32x4_t vpCDEF = vmlaq_f32(vc2, vc3, vtCDEF);
vp0123 = vmulq_f32(vp0123, vt0123);
vp4567 = vmulq_f32(vp4567, vt4567);
vp89AB = vmulq_f32(vp89AB, vt89AB);
vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
vt0123 = vmulq_f32(vt0123, vs0123);
vs0123 = vsubq_f32(vs0123, vone);
vt4567 = vmulq_f32(vt4567, vs4567);
vs4567 = vsubq_f32(vs4567, vone);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vs89AB = vsubq_f32(vs89AB, vone);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vsCDEF = vsubq_f32(vsCDEF, vone);
vp0123 = vmlaq_f32(vt0123, vp0123, vt0123);
vp4567 = vmlaq_f32(vt4567, vp4567, vt4567);
vp89AB = vmlaq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vtCDEF, vpCDEF, vtCDEF);
const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
vx0123 = vmulq_f32(vx0123, vbeta);
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vx4567 = vmulq_f32(vx4567, vbeta);
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vx89AB = vmulq_f32(vx89AB, vbeta);
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vxCDEF = vmulq_f32(vxCDEF, vbeta);
const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
vst1q_f32(output, vyCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
vn = vsubq_f32(vn, vmagic_bias);
const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
vn = vsubq_f32(vn, vmagic_bias);
const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 12,642 | 50.186235 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-neon-rr2-lut16-p3-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/neon-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vprescale = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.prescale);
const float32x4_t valpha = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.alpha);
const float32x4_t vbeta = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.beta);
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.sat_cutoff);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.magic_bias);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.log2e);
const int32x4_t vindex_mask = vmovq_n_s32(0xF);
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.minus_ln2_lo);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vxCDEF = vld1q_f32(input); input += 4;
float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
const float32x4_t vzGHIJ = vmaxq_f32(vmulq_f32(vxGHIJ, vprescale), vsat_cutoff);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vlog2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vzCDEF, vlog2e);
float32x4_t vnGHIJ = vmlaq_f32(vmagic_bias, vzGHIJ, vlog2e);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask), 2));
const int32x4_t ven0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 19);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask), 2));
const int32x4_t ven4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 19);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask), 2));
const int32x4_t ven89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 19);
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask), 2));
const int32x4_t venCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 19);
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask), 2));
const int32x4_t venGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 19);
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
int32x2_t vl01 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
int32x2_t vl23 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
vl01 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)), vl01, 1);
vl23 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)), vl23, 1);
const int32x4_t vl0123 = vcombine_s32(vl01, vl23);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
int32x2_t vl45 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
int32x2_t vl67 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
vl45 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)), vl45, 1);
vl67 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)), vl67, 1);
const int32x4_t vl4567 = vcombine_s32(vl45, vl67);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
int32x2_t vl89 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
int32x2_t vlAB = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
vl89 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)), vl89, 1);
vlAB = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)), vlAB, 1);
const int32x4_t vl89AB = vcombine_s32(vl89, vlAB);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
int32x2_t vlCD = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD));
int32x2_t vlEF = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF));
vlCD = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32)), vlCD, 1);
vlEF = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32)), vlEF, 1);
const int32x4_t vlCDEF = vcombine_s32(vlCD, vlEF);
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
int32x2_t vlGH = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxGH));
int32x2_t vlIJ = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxIJ));
vlGH = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxGH >> 32)), vlGH, 1);
vlIJ = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxIJ >> 32)), vlIJ, 1);
const int32x4_t vlGHIJ = vcombine_s32(vlGH, vlIJ);
vn0123 = vsubq_f32(vn0123, vmagic_bias);
float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vl0123, ven0123));
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vl4567, ven4567));
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vl89AB, ven89AB));
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vlCDEF, venCDEF));
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vlGHIJ, venGHIJ));
float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vminus_ln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vzCDEF, vnCDEF, vminus_ln2_hi);
float32x4_t vtGHIJ = vmlaq_f32(vzGHIJ, vnGHIJ, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
vtGHIJ = vmlaq_f32(vtGHIJ, vnGHIJ, vminus_ln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc2, vc3, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc2, vc3, vt4567);
float32x4_t vp89AB = vmlaq_f32(vc2, vc3, vt89AB);
float32x4_t vpCDEF = vmlaq_f32(vc2, vc3, vtCDEF);
float32x4_t vpGHIJ = vmlaq_f32(vc2, vc3, vtGHIJ);
vp0123 = vmulq_f32(vp0123, vt0123);
vp4567 = vmulq_f32(vp4567, vt4567);
vp89AB = vmulq_f32(vp89AB, vt89AB);
vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
vpGHIJ = vmulq_f32(vpGHIJ, vtGHIJ);
vt0123 = vmulq_f32(vt0123, vs0123);
vs0123 = vsubq_f32(vs0123, vone);
vt4567 = vmulq_f32(vt4567, vs4567);
vs4567 = vsubq_f32(vs4567, vone);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vs89AB = vsubq_f32(vs89AB, vone);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vsCDEF = vsubq_f32(vsCDEF, vone);
vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
vsGHIJ = vsubq_f32(vsGHIJ, vone);
vp0123 = vmlaq_f32(vt0123, vp0123, vt0123);
vp4567 = vmlaq_f32(vt4567, vp4567, vt4567);
vp89AB = vmlaq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vtCDEF, vpCDEF, vtCDEF);
vpGHIJ = vmlaq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
const float32x4_t veGHIJ = vmulq_f32(vaddq_f32(vpGHIJ, vsGHIJ), valpha);
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
vx0123 = vmulq_f32(vx0123, vbeta);
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vx4567 = vmulq_f32(vx4567, vbeta);
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vx89AB = vmulq_f32(vx89AB, vbeta);
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vxCDEF = vmulq_f32(vxCDEF, vbeta);
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
vxGHIJ = vmulq_f32(vxGHIJ, vbeta);
const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
const float32x4_t vyGHIJ = vbslq_f32(vmGHIJ, veGHIJ, vxGHIJ);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
vst1q_f32(output, vyCDEF); output += 4;
vst1q_f32(output, vyGHIJ); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
vn = vsubq_f32(vn, vmagic_bias);
const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
vn = vsubq_f32(vn, vmagic_bias);
const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 14,461 | 51.974359 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-neon-rr2-lut16-p3-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/neon-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vprescale = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.prescale);
const float32x4_t valpha = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.alpha);
const float32x4_t vbeta = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.beta);
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.sat_cutoff);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.magic_bias);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.log2e);
const int32x4_t vindex_mask = vmovq_n_s32(0xF);
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.minus_ln2_lo);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vxCDEF = vld1q_f32(input); input += 4;
float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
float32x4_t vxKLMN = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
const float32x4_t vzGHIJ = vmaxq_f32(vmulq_f32(vxGHIJ, vprescale), vsat_cutoff);
const float32x4_t vzKLMN = vmaxq_f32(vmulq_f32(vxKLMN, vprescale), vsat_cutoff);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vlog2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vzCDEF, vlog2e);
float32x4_t vnGHIJ = vmlaq_f32(vmagic_bias, vzGHIJ, vlog2e);
float32x4_t vnKLMN = vmlaq_f32(vmagic_bias, vzKLMN, vlog2e);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask), 2));
const int32x4_t ven0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 19);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask), 2));
const int32x4_t ven4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 19);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask), 2));
const int32x4_t ven89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 19);
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask), 2));
const int32x4_t venCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 19);
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask), 2));
const int32x4_t venGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 19);
const uint64x2_t vidxKLMN = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnKLMN), vindex_mask), 2));
const int32x4_t venKLMN = vshlq_n_s32(vreinterpretq_s32_f32(vnKLMN), 19);
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
int32x2_t vl01 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
int32x2_t vl23 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
vl01 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)), vl01, 1);
vl23 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)), vl23, 1);
const int32x4_t vl0123 = vcombine_s32(vl01, vl23);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
int32x2_t vl45 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
int32x2_t vl67 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
vl45 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)), vl45, 1);
vl67 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)), vl67, 1);
const int32x4_t vl4567 = vcombine_s32(vl45, vl67);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
int32x2_t vl89 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
int32x2_t vlAB = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
vl89 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)), vl89, 1);
vlAB = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)), vlAB, 1);
const int32x4_t vl89AB = vcombine_s32(vl89, vlAB);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
int32x2_t vlCD = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD));
int32x2_t vlEF = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF));
vlCD = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32)), vlCD, 1);
vlEF = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32)), vlEF, 1);
const int32x4_t vlCDEF = vcombine_s32(vlCD, vlEF);
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
int32x2_t vlGH = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxGH));
int32x2_t vlIJ = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxIJ));
vlGH = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxGH >> 32)), vlGH, 1);
vlIJ = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxIJ >> 32)), vlIJ, 1);
const int32x4_t vlGHIJ = vcombine_s32(vlGH, vlIJ);
const uint64_t vidxKL = vgetq_lane_u64(vidxKLMN, 0);
const uint64_t vidxMN = vgetq_lane_u64(vidxKLMN, 1);
int32x2_t vlKL = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxKL));
int32x2_t vlMN = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxMN));
vlKL = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxKL >> 32)), vlKL, 1);
vlMN = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxMN >> 32)), vlMN, 1);
const int32x4_t vlKLMN = vcombine_s32(vlKL, vlMN);
vn0123 = vsubq_f32(vn0123, vmagic_bias);
float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vl0123, ven0123));
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vl4567, ven4567));
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vl89AB, ven89AB));
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vlCDEF, venCDEF));
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vlGHIJ, venGHIJ));
vnKLMN = vsubq_f32(vnKLMN, vmagic_bias);
float32x4_t vsKLMN = vreinterpretq_f32_s32(vaddq_s32(vlKLMN, venKLMN));
float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vminus_ln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vzCDEF, vnCDEF, vminus_ln2_hi);
float32x4_t vtGHIJ = vmlaq_f32(vzGHIJ, vnGHIJ, vminus_ln2_hi);
float32x4_t vtKLMN = vmlaq_f32(vzKLMN, vnKLMN, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
vtGHIJ = vmlaq_f32(vtGHIJ, vnGHIJ, vminus_ln2_lo);
vtKLMN = vmlaq_f32(vtKLMN, vnKLMN, vminus_ln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc2, vc3, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc2, vc3, vt4567);
float32x4_t vp89AB = vmlaq_f32(vc2, vc3, vt89AB);
float32x4_t vpCDEF = vmlaq_f32(vc2, vc3, vtCDEF);
float32x4_t vpGHIJ = vmlaq_f32(vc2, vc3, vtGHIJ);
float32x4_t vpKLMN = vmlaq_f32(vc2, vc3, vtKLMN);
vp0123 = vmulq_f32(vp0123, vt0123);
vp4567 = vmulq_f32(vp4567, vt4567);
vp89AB = vmulq_f32(vp89AB, vt89AB);
vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
vpGHIJ = vmulq_f32(vpGHIJ, vtGHIJ);
vpKLMN = vmulq_f32(vpKLMN, vtKLMN);
vt0123 = vmulq_f32(vt0123, vs0123);
vs0123 = vsubq_f32(vs0123, vone);
vt4567 = vmulq_f32(vt4567, vs4567);
vs4567 = vsubq_f32(vs4567, vone);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vs89AB = vsubq_f32(vs89AB, vone);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vsCDEF = vsubq_f32(vsCDEF, vone);
vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
vsGHIJ = vsubq_f32(vsGHIJ, vone);
vtKLMN = vmulq_f32(vtKLMN, vsKLMN);
vsKLMN = vsubq_f32(vsKLMN, vone);
vp0123 = vmlaq_f32(vt0123, vp0123, vt0123);
vp4567 = vmlaq_f32(vt4567, vp4567, vt4567);
vp89AB = vmlaq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vtCDEF, vpCDEF, vtCDEF);
vpGHIJ = vmlaq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
vpKLMN = vmlaq_f32(vtKLMN, vpKLMN, vtKLMN);
const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
const float32x4_t veGHIJ = vmulq_f32(vaddq_f32(vpGHIJ, vsGHIJ), valpha);
const float32x4_t veKLMN = vmulq_f32(vaddq_f32(vpKLMN, vsKLMN), valpha);
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
vx0123 = vmulq_f32(vx0123, vbeta);
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vx4567 = vmulq_f32(vx4567, vbeta);
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vx89AB = vmulq_f32(vx89AB, vbeta);
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vxCDEF = vmulq_f32(vxCDEF, vbeta);
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
vxGHIJ = vmulq_f32(vxGHIJ, vbeta);
const uint32x4_t vmKLMN = vcltq_f32(vxKLMN, vmovq_n_f32(0.0f));
vxKLMN = vmulq_f32(vxKLMN, vbeta);
const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
const float32x4_t vyGHIJ = vbslq_f32(vmGHIJ, veGHIJ, vxGHIJ);
const float32x4_t vyKLMN = vbslq_f32(vmKLMN, veKLMN, vxKLMN);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
vst1q_f32(output, vyCDEF); output += 4;
vst1q_f32(output, vyGHIJ); output += 4;
vst1q_f32(output, vyKLMN); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
vn = vsubq_f32(vn, vmagic_bias);
const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
vn = vsubq_f32(vn, vmagic_bias);
const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 16,280 | 53.451505 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-neon-rr2-lut16-p3-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/neon-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vprescale = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.prescale);
const float32x4_t valpha = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.alpha);
const float32x4_t vbeta = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.beta);
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.sat_cutoff);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.magic_bias);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.log2e);
const int32x4_t vindex_mask = vmovq_n_s32(0xF);
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.minus_ln2_lo);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
vn = vsubq_f32(vn, vmagic_bias);
const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
vn = vsubq_f32(vn, vmagic_bias);
const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 5,277 | 40.888889 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-neon-rr2-lut16-p3-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/neon-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__neon_rr2_lut16_p3_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vprescale = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.prescale);
const float32x4_t valpha = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.alpha);
const float32x4_t vbeta = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.beta);
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.sat_cutoff);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.magic_bias);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.log2e);
const int32x4_t vindex_mask = vmovq_n_s32(0xF);
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.minus_ln2_lo);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut16_p3.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vlog2e);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask), 2));
const int32x4_t ven0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 19);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask), 2));
const int32x4_t ven4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 19);
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
int32x2_t vl01 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
int32x2_t vl23 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
vl01 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)), vl01, 1);
vl23 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)), vl23, 1);
const int32x4_t vl0123 = vcombine_s32(vl01, vl23);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
int32x2_t vl45 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
int32x2_t vl67 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
vl45 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)), vl45, 1);
vl67 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)), vl67, 1);
const int32x4_t vl4567 = vcombine_s32(vl45, vl67);
vn0123 = vsubq_f32(vn0123, vmagic_bias);
float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vl0123, ven0123));
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vl4567, ven4567));
float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc2, vc3, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc2, vc3, vt4567);
vp0123 = vmulq_f32(vp0123, vt0123);
vp4567 = vmulq_f32(vp4567, vt4567);
vt0123 = vmulq_f32(vt0123, vs0123);
vs0123 = vsubq_f32(vs0123, vone);
vt4567 = vmulq_f32(vt4567, vs4567);
vs4567 = vsubq_f32(vs4567, vone);
vp0123 = vmlaq_f32(vt0123, vp0123, vt0123);
vp4567 = vmlaq_f32(vt4567, vp4567, vt4567);
const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
vx0123 = vmulq_f32(vx0123, vbeta);
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vx4567 = vmulq_f32(vx4567, vbeta);
const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
vn = vsubq_f32(vn, vmagic_bias);
const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
vn = vsubq_f32(vn, vmagic_bias);
const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
float32x4_t vp = vmlaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 9,001 | 45.164103 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-neon-rr2-p6-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/neon-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__neon_rr2_p6_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vprescale = vld1q_dup_f32(¶ms->neon_rr2_p6.prescale);
const float32x4_t valpha = vld1q_dup_f32(¶ms->neon_rr2_p6.alpha);
const float32x4_t vbeta = vld1q_dup_f32(¶ms->neon_rr2_p6.beta);
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_rr2_p6.sat_cutoff);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_p6.magic_bias);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_p6.log2e);
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_p6.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_p6.minus_ln2_lo);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neon_rr2_p6.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_rr2_p6.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_rr2_p6.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_p6.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_p6.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vlog2e);
float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc5, vc6, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc5, vc6, vt4567);
float32x4_t vp89AB = vmlaq_f32(vc5, vc6, vt89AB);
vp0123 = vmlaq_f32(vc4, vp0123, vt0123);
vp4567 = vmlaq_f32(vc4, vp4567, vt4567);
vp89AB = vmlaq_f32(vc4, vp89AB, vt89AB);
vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
vp89AB = vmlaq_f32(vc3, vp89AB, vt89AB);
vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
vp89AB = vmlaq_f32(vc2, vp89AB, vt89AB);
vp0123 = vmulq_f32(vp0123, vt0123);
vp4567 = vmulq_f32(vp4567, vt4567);
vp89AB = vmulq_f32(vp89AB, vt89AB);
vt0123 = vmulq_f32(vt0123, vs0123);
vs0123 = vsubq_f32(vs0123, vone);
vt4567 = vmulq_f32(vt4567, vs4567);
vs4567 = vsubq_f32(vs4567, vone);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vs89AB = vsubq_f32(vs89AB, vone);
vp0123 = vmlaq_f32(vt0123, vp0123, vt0123);
vp4567 = vmlaq_f32(vt4567, vp4567, vt4567);
vp89AB = vmlaq_f32(vt89AB, vp89AB, vt89AB);
const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
vx0123 = vmulq_f32(vx0123, vbeta);
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vx4567 = vmulq_f32(vx4567, vbeta);
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vx89AB = vmulq_f32(vx89AB, vbeta);
const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
vp = vmlaq_f32(vc4, vp, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
vp = vmlaq_f32(vc4, vp, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 7,280 | 37.728723 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-neon-rr2-p6-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/neon-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__neon_rr2_p6_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vprescale = vld1q_dup_f32(¶ms->neon_rr2_p6.prescale);
const float32x4_t valpha = vld1q_dup_f32(¶ms->neon_rr2_p6.alpha);
const float32x4_t vbeta = vld1q_dup_f32(¶ms->neon_rr2_p6.beta);
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_rr2_p6.sat_cutoff);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_p6.magic_bias);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_p6.log2e);
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_p6.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_p6.minus_ln2_lo);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neon_rr2_p6.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_rr2_p6.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_rr2_p6.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_p6.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_p6.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vlog2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vzCDEF, vlog2e);
float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vminus_ln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vzCDEF, vnCDEF, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc5, vc6, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc5, vc6, vt4567);
float32x4_t vp89AB = vmlaq_f32(vc5, vc6, vt89AB);
float32x4_t vpCDEF = vmlaq_f32(vc5, vc6, vtCDEF);
vp0123 = vmlaq_f32(vc4, vp0123, vt0123);
vp4567 = vmlaq_f32(vc4, vp4567, vt4567);
vp89AB = vmlaq_f32(vc4, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc4, vpCDEF, vtCDEF);
vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
vp89AB = vmlaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc3, vpCDEF, vtCDEF);
vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
vp89AB = vmlaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc2, vpCDEF, vtCDEF);
vp0123 = vmulq_f32(vp0123, vt0123);
vp4567 = vmulq_f32(vp4567, vt4567);
vp89AB = vmulq_f32(vp89AB, vt89AB);
vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
vt0123 = vmulq_f32(vt0123, vs0123);
vs0123 = vsubq_f32(vs0123, vone);
vt4567 = vmulq_f32(vt4567, vs4567);
vs4567 = vsubq_f32(vs4567, vone);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vs89AB = vsubq_f32(vs89AB, vone);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vsCDEF = vsubq_f32(vsCDEF, vone);
vp0123 = vmlaq_f32(vt0123, vp0123, vt0123);
vp4567 = vmlaq_f32(vt4567, vp4567, vt4567);
vp89AB = vmlaq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vtCDEF, vpCDEF, vtCDEF);
const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
vx0123 = vmulq_f32(vx0123, vbeta);
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vx4567 = vmulq_f32(vx4567, vbeta);
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vx89AB = vmulq_f32(vx89AB, vbeta);
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vxCDEF = vmulq_f32(vxCDEF, vbeta);
const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
vst1q_f32(output, vyCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
vp = vmlaq_f32(vc4, vp, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
vp = vmlaq_f32(vc4, vp, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 8,397 | 39.375 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-neon-rr2-p6-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/neon-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__neon_rr2_p6_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vprescale = vld1q_dup_f32(¶ms->neon_rr2_p6.prescale);
const float32x4_t valpha = vld1q_dup_f32(¶ms->neon_rr2_p6.alpha);
const float32x4_t vbeta = vld1q_dup_f32(¶ms->neon_rr2_p6.beta);
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_rr2_p6.sat_cutoff);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_p6.magic_bias);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_p6.log2e);
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_p6.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_p6.minus_ln2_lo);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neon_rr2_p6.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_rr2_p6.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_rr2_p6.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_p6.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_p6.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vxCDEF = vld1q_f32(input); input += 4;
float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
const float32x4_t vzGHIJ = vmaxq_f32(vmulq_f32(vxGHIJ, vprescale), vsat_cutoff);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vlog2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vzCDEF, vlog2e);
float32x4_t vnGHIJ = vmlaq_f32(vmagic_bias, vzGHIJ, vlog2e);
float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vminus_ln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vzCDEF, vnCDEF, vminus_ln2_hi);
float32x4_t vtGHIJ = vmlaq_f32(vzGHIJ, vnGHIJ, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
vtGHIJ = vmlaq_f32(vtGHIJ, vnGHIJ, vminus_ln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc5, vc6, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc5, vc6, vt4567);
float32x4_t vp89AB = vmlaq_f32(vc5, vc6, vt89AB);
float32x4_t vpCDEF = vmlaq_f32(vc5, vc6, vtCDEF);
float32x4_t vpGHIJ = vmlaq_f32(vc5, vc6, vtGHIJ);
vp0123 = vmlaq_f32(vc4, vp0123, vt0123);
vp4567 = vmlaq_f32(vc4, vp4567, vt4567);
vp89AB = vmlaq_f32(vc4, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc4, vpCDEF, vtCDEF);
vpGHIJ = vmlaq_f32(vc4, vpGHIJ, vtGHIJ);
vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
vp89AB = vmlaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc3, vpCDEF, vtCDEF);
vpGHIJ = vmlaq_f32(vc3, vpGHIJ, vtGHIJ);
vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
vp89AB = vmlaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc2, vpCDEF, vtCDEF);
vpGHIJ = vmlaq_f32(vc2, vpGHIJ, vtGHIJ);
vp0123 = vmulq_f32(vp0123, vt0123);
vp4567 = vmulq_f32(vp4567, vt4567);
vp89AB = vmulq_f32(vp89AB, vt89AB);
vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
vpGHIJ = vmulq_f32(vpGHIJ, vtGHIJ);
vt0123 = vmulq_f32(vt0123, vs0123);
vs0123 = vsubq_f32(vs0123, vone);
vt4567 = vmulq_f32(vt4567, vs4567);
vs4567 = vsubq_f32(vs4567, vone);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vs89AB = vsubq_f32(vs89AB, vone);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vsCDEF = vsubq_f32(vsCDEF, vone);
vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
vsGHIJ = vsubq_f32(vsGHIJ, vone);
vp0123 = vmlaq_f32(vt0123, vp0123, vt0123);
vp4567 = vmlaq_f32(vt4567, vp4567, vt4567);
vp89AB = vmlaq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vtCDEF, vpCDEF, vtCDEF);
vpGHIJ = vmlaq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
const float32x4_t veGHIJ = vmulq_f32(vaddq_f32(vpGHIJ, vsGHIJ), valpha);
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
vx0123 = vmulq_f32(vx0123, vbeta);
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vx4567 = vmulq_f32(vx4567, vbeta);
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vx89AB = vmulq_f32(vx89AB, vbeta);
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vxCDEF = vmulq_f32(vxCDEF, vbeta);
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
vxGHIJ = vmulq_f32(vxGHIJ, vbeta);
const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
const float32x4_t vyGHIJ = vbslq_f32(vmGHIJ, veGHIJ, vxGHIJ);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
vst1q_f32(output, vyCDEF); output += 4;
vst1q_f32(output, vyGHIJ); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
vp = vmlaq_f32(vc4, vp, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
vp = vmlaq_f32(vc4, vp, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 9,514 | 40.732456 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-neon-rr2-p6-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/neon-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__neon_rr2_p6_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vprescale = vld1q_dup_f32(¶ms->neon_rr2_p6.prescale);
const float32x4_t valpha = vld1q_dup_f32(¶ms->neon_rr2_p6.alpha);
const float32x4_t vbeta = vld1q_dup_f32(¶ms->neon_rr2_p6.beta);
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_rr2_p6.sat_cutoff);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_p6.magic_bias);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_p6.log2e);
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_p6.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_p6.minus_ln2_lo);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neon_rr2_p6.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_rr2_p6.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_rr2_p6.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_p6.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_p6.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vxCDEF = vld1q_f32(input); input += 4;
float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
float32x4_t vxKLMN = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
const float32x4_t vzGHIJ = vmaxq_f32(vmulq_f32(vxGHIJ, vprescale), vsat_cutoff);
const float32x4_t vzKLMN = vmaxq_f32(vmulq_f32(vxKLMN, vprescale), vsat_cutoff);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vlog2e);
float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vlog2e);
float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vzCDEF, vlog2e);
float32x4_t vnGHIJ = vmlaq_f32(vmagic_bias, vzGHIJ, vlog2e);
float32x4_t vnKLMN = vmlaq_f32(vmagic_bias, vzKLMN, vlog2e);
float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vsKLMN = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnKLMN), 23));
vnKLMN = vsubq_f32(vnKLMN, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vminus_ln2_hi);
float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vminus_ln2_hi);
float32x4_t vtCDEF = vmlaq_f32(vzCDEF, vnCDEF, vminus_ln2_hi);
float32x4_t vtGHIJ = vmlaq_f32(vzGHIJ, vnGHIJ, vminus_ln2_hi);
float32x4_t vtKLMN = vmlaq_f32(vzKLMN, vnKLMN, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
vt89AB = vmlaq_f32(vt89AB, vn89AB, vminus_ln2_lo);
vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vminus_ln2_lo);
vtGHIJ = vmlaq_f32(vtGHIJ, vnGHIJ, vminus_ln2_lo);
vtKLMN = vmlaq_f32(vtKLMN, vnKLMN, vminus_ln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc5, vc6, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc5, vc6, vt4567);
float32x4_t vp89AB = vmlaq_f32(vc5, vc6, vt89AB);
float32x4_t vpCDEF = vmlaq_f32(vc5, vc6, vtCDEF);
float32x4_t vpGHIJ = vmlaq_f32(vc5, vc6, vtGHIJ);
float32x4_t vpKLMN = vmlaq_f32(vc5, vc6, vtKLMN);
vp0123 = vmlaq_f32(vc4, vp0123, vt0123);
vp4567 = vmlaq_f32(vc4, vp4567, vt4567);
vp89AB = vmlaq_f32(vc4, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc4, vpCDEF, vtCDEF);
vpGHIJ = vmlaq_f32(vc4, vpGHIJ, vtGHIJ);
vpKLMN = vmlaq_f32(vc4, vpKLMN, vtKLMN);
vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
vp89AB = vmlaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc3, vpCDEF, vtCDEF);
vpGHIJ = vmlaq_f32(vc3, vpGHIJ, vtGHIJ);
vpKLMN = vmlaq_f32(vc3, vpKLMN, vtKLMN);
vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
vp89AB = vmlaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vc2, vpCDEF, vtCDEF);
vpGHIJ = vmlaq_f32(vc2, vpGHIJ, vtGHIJ);
vpKLMN = vmlaq_f32(vc2, vpKLMN, vtKLMN);
vp0123 = vmulq_f32(vp0123, vt0123);
vp4567 = vmulq_f32(vp4567, vt4567);
vp89AB = vmulq_f32(vp89AB, vt89AB);
vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
vpGHIJ = vmulq_f32(vpGHIJ, vtGHIJ);
vpKLMN = vmulq_f32(vpKLMN, vtKLMN);
vt0123 = vmulq_f32(vt0123, vs0123);
vs0123 = vsubq_f32(vs0123, vone);
vt4567 = vmulq_f32(vt4567, vs4567);
vs4567 = vsubq_f32(vs4567, vone);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vs89AB = vsubq_f32(vs89AB, vone);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vsCDEF = vsubq_f32(vsCDEF, vone);
vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
vsGHIJ = vsubq_f32(vsGHIJ, vone);
vtKLMN = vmulq_f32(vtKLMN, vsKLMN);
vsKLMN = vsubq_f32(vsKLMN, vone);
vp0123 = vmlaq_f32(vt0123, vp0123, vt0123);
vp4567 = vmlaq_f32(vt4567, vp4567, vt4567);
vp89AB = vmlaq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vmlaq_f32(vtCDEF, vpCDEF, vtCDEF);
vpGHIJ = vmlaq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
vpKLMN = vmlaq_f32(vtKLMN, vpKLMN, vtKLMN);
const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
const float32x4_t veGHIJ = vmulq_f32(vaddq_f32(vpGHIJ, vsGHIJ), valpha);
const float32x4_t veKLMN = vmulq_f32(vaddq_f32(vpKLMN, vsKLMN), valpha);
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
vx0123 = vmulq_f32(vx0123, vbeta);
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vx4567 = vmulq_f32(vx4567, vbeta);
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vx89AB = vmulq_f32(vx89AB, vbeta);
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vxCDEF = vmulq_f32(vxCDEF, vbeta);
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
vxGHIJ = vmulq_f32(vxGHIJ, vbeta);
const uint32x4_t vmKLMN = vcltq_f32(vxKLMN, vmovq_n_f32(0.0f));
vxKLMN = vmulq_f32(vxKLMN, vbeta);
const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
const float32x4_t vyGHIJ = vbslq_f32(vmGHIJ, veGHIJ, vxGHIJ);
const float32x4_t vyKLMN = vbslq_f32(vmKLMN, veKLMN, vxKLMN);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
vst1q_f32(output, vyCDEF); output += 4;
vst1q_f32(output, vyGHIJ); output += 4;
vst1q_f32(output, vyKLMN); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
vp = vmlaq_f32(vc4, vp, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
vp = vmlaq_f32(vc4, vp, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 10,631 | 41.870968 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-neon-rr2-p6-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/neon-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__neon_rr2_p6_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vprescale = vld1q_dup_f32(¶ms->neon_rr2_p6.prescale);
const float32x4_t valpha = vld1q_dup_f32(¶ms->neon_rr2_p6.alpha);
const float32x4_t vbeta = vld1q_dup_f32(¶ms->neon_rr2_p6.beta);
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_rr2_p6.sat_cutoff);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_p6.magic_bias);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_p6.log2e);
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_p6.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_p6.minus_ln2_lo);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neon_rr2_p6.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_rr2_p6.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_rr2_p6.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_p6.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_p6.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
vp = vmlaq_f32(vc4, vp, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
vp = vmlaq_f32(vc4, vp, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 3,839 | 33.909091 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-neon-rr2-p6-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/neon-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__neon_rr2_p6_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vprescale = vld1q_dup_f32(¶ms->neon_rr2_p6.prescale);
const float32x4_t valpha = vld1q_dup_f32(¶ms->neon_rr2_p6.alpha);
const float32x4_t vbeta = vld1q_dup_f32(¶ms->neon_rr2_p6.beta);
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neon_rr2_p6.sat_cutoff);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_p6.magic_bias);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neon_rr2_p6.log2e);
const float32x4_t vminus_ln2_hi = vld1q_dup_f32(¶ms->neon_rr2_p6.minus_ln2_hi);
const float32x4_t vminus_ln2_lo = vld1q_dup_f32(¶ms->neon_rr2_p6.minus_ln2_lo);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neon_rr2_p6.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neon_rr2_p6.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neon_rr2_p6.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neon_rr2_p6.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_p6.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vlog2e);
float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vlog2e);
float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vminus_ln2_hi);
float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vminus_ln2_hi);
vt0123 = vmlaq_f32(vt0123, vn0123, vminus_ln2_lo);
vt4567 = vmlaq_f32(vt4567, vn4567, vminus_ln2_lo);
float32x4_t vp0123 = vmlaq_f32(vc5, vc6, vt0123);
float32x4_t vp4567 = vmlaq_f32(vc5, vc6, vt4567);
vp0123 = vmlaq_f32(vc4, vp0123, vt0123);
vp4567 = vmlaq_f32(vc4, vp4567, vt4567);
vp0123 = vmlaq_f32(vc3, vp0123, vt0123);
vp4567 = vmlaq_f32(vc3, vp4567, vt4567);
vp0123 = vmlaq_f32(vc2, vp0123, vt0123);
vp4567 = vmlaq_f32(vc2, vp4567, vt4567);
vp0123 = vmulq_f32(vp0123, vt0123);
vp4567 = vmulq_f32(vp4567, vt4567);
vt0123 = vmulq_f32(vt0123, vs0123);
vs0123 = vsubq_f32(vs0123, vone);
vt4567 = vmulq_f32(vt4567, vs4567);
vs4567 = vsubq_f32(vs4567, vone);
vp0123 = vmlaq_f32(vt0123, vp0123, vt0123);
vp4567 = vmlaq_f32(vt4567, vp4567, vt4567);
const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
vx0123 = vmulq_f32(vx0123, vbeta);
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vx4567 = vmulq_f32(vx4567, vbeta);
const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
vp = vmlaq_f32(vc4, vp, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vlog2e);
float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vmlaq_f32(vz, vn, vminus_ln2_hi);
vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
float32x4_t vp = vmlaq_f32(vc5, vc6, vt);
vp = vmlaq_f32(vc4, vp, vt);
vp = vmlaq_f32(vc3, vp, vt);
vp = vmlaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vmlaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 6,160 | 35.672619 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-neonfma-rr1-lut16-p3-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/neon-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vprescale = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.prescale);
const float32x4_t valpha = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.alpha);
const float32x4_t vbeta = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.beta);
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.sat_cutoff);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.magic_bias);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.log2e);
const int32x4_t vindex_mask = vmovq_n_s32(0xF);
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.minus_ln2);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vlog2e);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask), 2));
const int32x4_t ven0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 19);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask), 2));
const int32x4_t ven4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 19);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask), 2));
const int32x4_t ven89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 19);
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
int32x2_t vl01 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
int32x2_t vl23 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
vl01 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)), vl01, 1);
vl23 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)), vl23, 1);
const int32x4_t vl0123 = vcombine_s32(vl01, vl23);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
int32x2_t vl45 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
int32x2_t vl67 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
vl45 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)), vl45, 1);
vl67 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)), vl67, 1);
const int32x4_t vl4567 = vcombine_s32(vl45, vl67);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
int32x2_t vl89 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
int32x2_t vlAB = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
vl89 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)), vl89, 1);
vlAB = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)), vlAB, 1);
const int32x4_t vl89AB = vcombine_s32(vl89, vlAB);
vn0123 = vsubq_f32(vn0123, vmagic_bias);
float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vl0123, ven0123));
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vl4567, ven4567));
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vl89AB, ven89AB));
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vminus_ln2);
float32x4_t vp0123 = vfmaq_f32(vc2, vc3, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc2, vc3, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc2, vc3, vt89AB);
vp0123 = vmulq_f32(vp0123, vt0123);
vp4567 = vmulq_f32(vp4567, vt4567);
vp89AB = vmulq_f32(vp89AB, vt89AB);
vt0123 = vmulq_f32(vt0123, vs0123);
vs0123 = vsubq_f32(vs0123, vone);
vt4567 = vmulq_f32(vt4567, vs4567);
vs4567 = vsubq_f32(vs4567, vone);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vs89AB = vsubq_f32(vs89AB, vone);
vp0123 = vfmaq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmaq_f32(vt4567, vp4567, vt4567);
vp89AB = vfmaq_f32(vt89AB, vp89AB, vt89AB);
const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
vx0123 = vmulq_f32(vx0123, vbeta);
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vx4567 = vmulq_f32(vx4567, vbeta);
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vx89AB = vmulq_f32(vx89AB, vbeta);
const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
vn = vsubq_f32(vn, vmagic_bias);
const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
vn = vsubq_f32(vn, vmagic_bias);
const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 10,488 | 48.014019 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-neonfma-rr1-lut16-p3-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/neon-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vprescale = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.prescale);
const float32x4_t valpha = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.alpha);
const float32x4_t vbeta = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.beta);
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.sat_cutoff);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.magic_bias);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.log2e);
const int32x4_t vindex_mask = vmovq_n_s32(0xF);
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.minus_ln2);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vlog2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vlog2e);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask), 2));
const int32x4_t ven0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 19);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask), 2));
const int32x4_t ven4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 19);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask), 2));
const int32x4_t ven89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 19);
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask), 2));
const int32x4_t venCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 19);
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
int32x2_t vl01 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
int32x2_t vl23 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
vl01 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)), vl01, 1);
vl23 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)), vl23, 1);
const int32x4_t vl0123 = vcombine_s32(vl01, vl23);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
int32x2_t vl45 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
int32x2_t vl67 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
vl45 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)), vl45, 1);
vl67 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)), vl67, 1);
const int32x4_t vl4567 = vcombine_s32(vl45, vl67);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
int32x2_t vl89 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
int32x2_t vlAB = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
vl89 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)), vl89, 1);
vlAB = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)), vlAB, 1);
const int32x4_t vl89AB = vcombine_s32(vl89, vlAB);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
int32x2_t vlCD = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD));
int32x2_t vlEF = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF));
vlCD = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32)), vlCD, 1);
vlEF = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32)), vlEF, 1);
const int32x4_t vlCDEF = vcombine_s32(vlCD, vlEF);
vn0123 = vsubq_f32(vn0123, vmagic_bias);
float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vl0123, ven0123));
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vl4567, ven4567));
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vl89AB, ven89AB));
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vlCDEF, venCDEF));
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vminus_ln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vminus_ln2);
float32x4_t vp0123 = vfmaq_f32(vc2, vc3, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc2, vc3, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc2, vc3, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc2, vc3, vtCDEF);
vp0123 = vmulq_f32(vp0123, vt0123);
vp4567 = vmulq_f32(vp4567, vt4567);
vp89AB = vmulq_f32(vp89AB, vt89AB);
vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
vt0123 = vmulq_f32(vt0123, vs0123);
vs0123 = vsubq_f32(vs0123, vone);
vt4567 = vmulq_f32(vt4567, vs4567);
vs4567 = vsubq_f32(vs4567, vone);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vs89AB = vsubq_f32(vs89AB, vone);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vsCDEF = vsubq_f32(vsCDEF, vone);
vp0123 = vfmaq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmaq_f32(vt4567, vp4567, vt4567);
vp89AB = vfmaq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vtCDEF, vpCDEF, vtCDEF);
const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
vx0123 = vmulq_f32(vx0123, vbeta);
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vx4567 = vmulq_f32(vx4567, vbeta);
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vx89AB = vmulq_f32(vx89AB, vbeta);
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vxCDEF = vmulq_f32(vxCDEF, vbeta);
const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
vst1q_f32(output, vyCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
vn = vsubq_f32(vn, vmagic_bias);
const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
vn = vsubq_f32(vn, vmagic_bias);
const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 12,249 | 50.25523 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-neonfma-rr1-lut16-p3-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/neon-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vprescale = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.prescale);
const float32x4_t valpha = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.alpha);
const float32x4_t vbeta = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.beta);
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.sat_cutoff);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.magic_bias);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.log2e);
const int32x4_t vindex_mask = vmovq_n_s32(0xF);
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.minus_ln2);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vxCDEF = vld1q_f32(input); input += 4;
float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
const float32x4_t vzGHIJ = vmaxq_f32(vmulq_f32(vxGHIJ, vprescale), vsat_cutoff);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vlog2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vlog2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vlog2e);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask), 2));
const int32x4_t ven0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 19);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask), 2));
const int32x4_t ven4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 19);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask), 2));
const int32x4_t ven89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 19);
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask), 2));
const int32x4_t venCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 19);
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask), 2));
const int32x4_t venGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 19);
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
int32x2_t vl01 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
int32x2_t vl23 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
vl01 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)), vl01, 1);
vl23 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)), vl23, 1);
const int32x4_t vl0123 = vcombine_s32(vl01, vl23);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
int32x2_t vl45 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
int32x2_t vl67 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
vl45 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)), vl45, 1);
vl67 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)), vl67, 1);
const int32x4_t vl4567 = vcombine_s32(vl45, vl67);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
int32x2_t vl89 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
int32x2_t vlAB = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
vl89 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)), vl89, 1);
vlAB = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)), vlAB, 1);
const int32x4_t vl89AB = vcombine_s32(vl89, vlAB);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
int32x2_t vlCD = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD));
int32x2_t vlEF = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF));
vlCD = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32)), vlCD, 1);
vlEF = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32)), vlEF, 1);
const int32x4_t vlCDEF = vcombine_s32(vlCD, vlEF);
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
int32x2_t vlGH = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxGH));
int32x2_t vlIJ = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxIJ));
vlGH = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxGH >> 32)), vlGH, 1);
vlIJ = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxIJ >> 32)), vlIJ, 1);
const int32x4_t vlGHIJ = vcombine_s32(vlGH, vlIJ);
vn0123 = vsubq_f32(vn0123, vmagic_bias);
float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vl0123, ven0123));
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vl4567, ven4567));
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vl89AB, ven89AB));
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vlCDEF, venCDEF));
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vlGHIJ, venGHIJ));
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vminus_ln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vminus_ln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vminus_ln2);
float32x4_t vp0123 = vfmaq_f32(vc2, vc3, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc2, vc3, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc2, vc3, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc2, vc3, vtCDEF);
float32x4_t vpGHIJ = vfmaq_f32(vc2, vc3, vtGHIJ);
vp0123 = vmulq_f32(vp0123, vt0123);
vp4567 = vmulq_f32(vp4567, vt4567);
vp89AB = vmulq_f32(vp89AB, vt89AB);
vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
vpGHIJ = vmulq_f32(vpGHIJ, vtGHIJ);
vt0123 = vmulq_f32(vt0123, vs0123);
vs0123 = vsubq_f32(vs0123, vone);
vt4567 = vmulq_f32(vt4567, vs4567);
vs4567 = vsubq_f32(vs4567, vone);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vs89AB = vsubq_f32(vs89AB, vone);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vsCDEF = vsubq_f32(vsCDEF, vone);
vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
vsGHIJ = vsubq_f32(vsGHIJ, vone);
vp0123 = vfmaq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmaq_f32(vt4567, vp4567, vt4567);
vp89AB = vfmaq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vtCDEF, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
const float32x4_t veGHIJ = vmulq_f32(vaddq_f32(vpGHIJ, vsGHIJ), valpha);
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
vx0123 = vmulq_f32(vx0123, vbeta);
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vx4567 = vmulq_f32(vx4567, vbeta);
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vx89AB = vmulq_f32(vx89AB, vbeta);
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vxCDEF = vmulq_f32(vxCDEF, vbeta);
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
vxGHIJ = vmulq_f32(vxGHIJ, vbeta);
const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
const float32x4_t vyGHIJ = vbslq_f32(vmGHIJ, veGHIJ, vxGHIJ);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
vst1q_f32(output, vyCDEF); output += 4;
vst1q_f32(output, vyGHIJ); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
vn = vsubq_f32(vn, vmagic_bias);
const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
vn = vsubq_f32(vn, vmagic_bias);
const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 14,010 | 52.07197 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-neonfma-rr1-lut16-p3-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/neon-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vprescale = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.prescale);
const float32x4_t valpha = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.alpha);
const float32x4_t vbeta = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.beta);
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.sat_cutoff);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.magic_bias);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.log2e);
const int32x4_t vindex_mask = vmovq_n_s32(0xF);
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.minus_ln2);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vxCDEF = vld1q_f32(input); input += 4;
float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
float32x4_t vxKLMN = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
const float32x4_t vzGHIJ = vmaxq_f32(vmulq_f32(vxGHIJ, vprescale), vsat_cutoff);
const float32x4_t vzKLMN = vmaxq_f32(vmulq_f32(vxKLMN, vprescale), vsat_cutoff);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vlog2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vlog2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vlog2e);
float32x4_t vnKLMN = vfmaq_f32(vmagic_bias, vzKLMN, vlog2e);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask), 2));
const int32x4_t ven0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 19);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask), 2));
const int32x4_t ven4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 19);
const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask), 2));
const int32x4_t ven89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 19);
const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask), 2));
const int32x4_t venCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 19);
const uint64x2_t vidxGHIJ = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnGHIJ), vindex_mask), 2));
const int32x4_t venGHIJ = vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 19);
const uint64x2_t vidxKLMN = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vnKLMN), vindex_mask), 2));
const int32x4_t venKLMN = vshlq_n_s32(vreinterpretq_s32_f32(vnKLMN), 19);
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
int32x2_t vl01 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
int32x2_t vl23 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
vl01 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)), vl01, 1);
vl23 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)), vl23, 1);
const int32x4_t vl0123 = vcombine_s32(vl01, vl23);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
int32x2_t vl45 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
int32x2_t vl67 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
vl45 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)), vl45, 1);
vl67 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)), vl67, 1);
const int32x4_t vl4567 = vcombine_s32(vl45, vl67);
const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
int32x2_t vl89 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx89));
int32x2_t vlAB = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxAB));
vl89 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx89 >> 32)), vl89, 1);
vlAB = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxAB >> 32)), vlAB, 1);
const int32x4_t vl89AB = vcombine_s32(vl89, vlAB);
const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
int32x2_t vlCD = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxCD));
int32x2_t vlEF = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxEF));
vlCD = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxCD >> 32)), vlCD, 1);
vlEF = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxEF >> 32)), vlEF, 1);
const int32x4_t vlCDEF = vcombine_s32(vlCD, vlEF);
const uint64_t vidxGH = vgetq_lane_u64(vidxGHIJ, 0);
const uint64_t vidxIJ = vgetq_lane_u64(vidxGHIJ, 1);
int32x2_t vlGH = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxGH));
int32x2_t vlIJ = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxIJ));
vlGH = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxGH >> 32)), vlGH, 1);
vlIJ = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxIJ >> 32)), vlIJ, 1);
const int32x4_t vlGHIJ = vcombine_s32(vlGH, vlIJ);
const uint64_t vidxKL = vgetq_lane_u64(vidxKLMN, 0);
const uint64_t vidxMN = vgetq_lane_u64(vidxKLMN, 1);
int32x2_t vlKL = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxKL));
int32x2_t vlMN = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxMN));
vlKL = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxKL >> 32)), vlKL, 1);
vlMN = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidxMN >> 32)), vlMN, 1);
const int32x4_t vlKLMN = vcombine_s32(vlKL, vlMN);
vn0123 = vsubq_f32(vn0123, vmagic_bias);
float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vl0123, ven0123));
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vl4567, ven4567));
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vl89AB, ven89AB));
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vlCDEF, venCDEF));
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vsGHIJ = vreinterpretq_f32_s32(vaddq_s32(vlGHIJ, venGHIJ));
vnKLMN = vsubq_f32(vnKLMN, vmagic_bias);
float32x4_t vsKLMN = vreinterpretq_f32_s32(vaddq_s32(vlKLMN, venKLMN));
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vminus_ln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vminus_ln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vminus_ln2);
float32x4_t vtKLMN = vfmaq_f32(vzKLMN, vnKLMN, vminus_ln2);
float32x4_t vp0123 = vfmaq_f32(vc2, vc3, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc2, vc3, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc2, vc3, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc2, vc3, vtCDEF);
float32x4_t vpGHIJ = vfmaq_f32(vc2, vc3, vtGHIJ);
float32x4_t vpKLMN = vfmaq_f32(vc2, vc3, vtKLMN);
vp0123 = vmulq_f32(vp0123, vt0123);
vp4567 = vmulq_f32(vp4567, vt4567);
vp89AB = vmulq_f32(vp89AB, vt89AB);
vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
vpGHIJ = vmulq_f32(vpGHIJ, vtGHIJ);
vpKLMN = vmulq_f32(vpKLMN, vtKLMN);
vt0123 = vmulq_f32(vt0123, vs0123);
vs0123 = vsubq_f32(vs0123, vone);
vt4567 = vmulq_f32(vt4567, vs4567);
vs4567 = vsubq_f32(vs4567, vone);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vs89AB = vsubq_f32(vs89AB, vone);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vsCDEF = vsubq_f32(vsCDEF, vone);
vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
vsGHIJ = vsubq_f32(vsGHIJ, vone);
vtKLMN = vmulq_f32(vtKLMN, vsKLMN);
vsKLMN = vsubq_f32(vsKLMN, vone);
vp0123 = vfmaq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmaq_f32(vt4567, vp4567, vt4567);
vp89AB = vfmaq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vtCDEF, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
vpKLMN = vfmaq_f32(vtKLMN, vpKLMN, vtKLMN);
const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
const float32x4_t veGHIJ = vmulq_f32(vaddq_f32(vpGHIJ, vsGHIJ), valpha);
const float32x4_t veKLMN = vmulq_f32(vaddq_f32(vpKLMN, vsKLMN), valpha);
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
vx0123 = vmulq_f32(vx0123, vbeta);
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vx4567 = vmulq_f32(vx4567, vbeta);
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vx89AB = vmulq_f32(vx89AB, vbeta);
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vxCDEF = vmulq_f32(vxCDEF, vbeta);
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
vxGHIJ = vmulq_f32(vxGHIJ, vbeta);
const uint32x4_t vmKLMN = vcltq_f32(vxKLMN, vmovq_n_f32(0.0f));
vxKLMN = vmulq_f32(vxKLMN, vbeta);
const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
const float32x4_t vyGHIJ = vbslq_f32(vmGHIJ, veGHIJ, vxGHIJ);
const float32x4_t vyKLMN = vbslq_f32(vmKLMN, veKLMN, vxKLMN);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
vst1q_f32(output, vyCDEF); output += 4;
vst1q_f32(output, vyGHIJ); output += 4;
vst1q_f32(output, vyKLMN); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
vn = vsubq_f32(vn, vmagic_bias);
const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
vn = vsubq_f32(vn, vmagic_bias);
const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 15,771 | 53.574394 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-neonfma-rr1-lut16-p3-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/neon-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vprescale = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.prescale);
const float32x4_t valpha = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.alpha);
const float32x4_t vbeta = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.beta);
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.sat_cutoff);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.magic_bias);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.log2e);
const int32x4_t vindex_mask = vmovq_n_s32(0xF);
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.minus_ln2);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
vn = vsubq_f32(vn, vmagic_bias);
const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
vn = vsubq_f32(vn, vmagic_bias);
const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 5,117 | 40.609756 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-neonfma-rr1-lut16-p3-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/neon-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const int32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vprescale = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.prescale);
const float32x4_t valpha = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.alpha);
const float32x4_t vbeta = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.beta);
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.sat_cutoff);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.magic_bias);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.log2e);
const int32x4_t vindex_mask = vmovq_n_s32(0xF);
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.minus_ln2);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vlog2e);
const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask), 2));
const int32x4_t ven0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 19);
const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask), 2));
const int32x4_t ven4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 19);
const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
int32x2_t vl01 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx01));
int32x2_t vl23 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx23));
vl01 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx01 >> 32)), vl01, 1);
vl23 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx23 >> 32)), vl23, 1);
const int32x4_t vl0123 = vcombine_s32(vl01, vl23);
const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
int32x2_t vl45 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx45));
int32x2_t vl67 = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx67));
vl45 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx45 >> 32)), vl45, 1);
vl67 = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx67 >> 32)), vl67, 1);
const int32x4_t vl4567 = vcombine_s32(vl45, vl67);
vn0123 = vsubq_f32(vn0123, vmagic_bias);
float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vl0123, ven0123));
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vl4567, ven4567));
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vminus_ln2);
float32x4_t vp0123 = vfmaq_f32(vc2, vc3, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc2, vc3, vt4567);
vp0123 = vmulq_f32(vp0123, vt0123);
vp4567 = vmulq_f32(vp4567, vt4567);
vt0123 = vmulq_f32(vt0123, vs0123);
vs0123 = vsubq_f32(vs0123, vone);
vt4567 = vmulq_f32(vt4567, vs4567);
vs4567 = vsubq_f32(vs4567, vone);
vp0123 = vfmaq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmaq_f32(vt4567, vp4567, vt4567);
const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
vx0123 = vmulq_f32(vx0123, vbeta);
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vx4567 = vmulq_f32(vx4567, vbeta);
const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
vn = vsubq_f32(vn, vmagic_bias);
const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
const uint64x2_t vidx = vreinterpretq_u64_s32(vshlq_n_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask), 2));
const int32x4_t ven = vshlq_n_s32(vreinterpretq_s32_f32(vn), 19);
const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
int32x2_t vl_lo = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
int32x2_t vl_hi = vld1_dup_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
vl_lo = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)), vl_lo, 1);
vl_hi = vld1_lane_s32((const int32_t*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)), vl_hi, 1);
vn = vsubq_f32(vn, vmagic_bias);
const int32x4_t vl = vcombine_s32(vl_lo, vl_hi);
float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vl, ven));
float32x4_t vp = vfmaq_f32(vc2, vc3, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 8,724 | 45.164021 | 127 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-neonfma-rr1-p6-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/neon-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__neonfma_rr1_p6_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vprescale = vld1q_dup_f32(¶ms->neonfma_rr1_p6.prescale);
const float32x4_t valpha = vld1q_dup_f32(¶ms->neonfma_rr1_p6.alpha);
const float32x4_t vbeta = vld1q_dup_f32(¶ms->neonfma_rr1_p6.beta);
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p6.sat_cutoff);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p6.magic_bias);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_p6.log2e);
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.minus_ln2);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vlog2e);
float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vminus_ln2);
float32x4_t vp0123 = vfmaq_f32(vc5, vc6, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc5, vc6, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc5, vc6, vt89AB);
vp0123 = vfmaq_f32(vc4, vp0123, vt0123);
vp4567 = vfmaq_f32(vc4, vp4567, vt4567);
vp89AB = vfmaq_f32(vc4, vp89AB, vt89AB);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vp0123 = vmulq_f32(vp0123, vt0123);
vp4567 = vmulq_f32(vp4567, vt4567);
vp89AB = vmulq_f32(vp89AB, vt89AB);
vt0123 = vmulq_f32(vt0123, vs0123);
vs0123 = vsubq_f32(vs0123, vone);
vt4567 = vmulq_f32(vt4567, vs4567);
vs4567 = vsubq_f32(vs4567, vone);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vs89AB = vsubq_f32(vs89AB, vone);
vp0123 = vfmaq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmaq_f32(vt4567, vp4567, vt4567);
vp89AB = vfmaq_f32(vt89AB, vp89AB, vt89AB);
const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
vx0123 = vmulq_f32(vx0123, vbeta);
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vx4567 = vmulq_f32(vx4567, vbeta);
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vx89AB = vmulq_f32(vx89AB, vbeta);
const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 6,960 | 37.458564 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-neonfma-rr1-p6-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/neon-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__neonfma_rr1_p6_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vprescale = vld1q_dup_f32(¶ms->neonfma_rr1_p6.prescale);
const float32x4_t valpha = vld1q_dup_f32(¶ms->neonfma_rr1_p6.alpha);
const float32x4_t vbeta = vld1q_dup_f32(¶ms->neonfma_rr1_p6.beta);
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p6.sat_cutoff);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p6.magic_bias);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_p6.log2e);
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.minus_ln2);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vxCDEF = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vlog2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vlog2e);
float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vminus_ln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vminus_ln2);
float32x4_t vp0123 = vfmaq_f32(vc5, vc6, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc5, vc6, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc5, vc6, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc5, vc6, vtCDEF);
vp0123 = vfmaq_f32(vc4, vp0123, vt0123);
vp4567 = vfmaq_f32(vc4, vp4567, vt4567);
vp89AB = vfmaq_f32(vc4, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc4, vpCDEF, vtCDEF);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vp0123 = vmulq_f32(vp0123, vt0123);
vp4567 = vmulq_f32(vp4567, vt4567);
vp89AB = vmulq_f32(vp89AB, vt89AB);
vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
vt0123 = vmulq_f32(vt0123, vs0123);
vs0123 = vsubq_f32(vs0123, vone);
vt4567 = vmulq_f32(vt4567, vs4567);
vs4567 = vsubq_f32(vs4567, vone);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vs89AB = vsubq_f32(vs89AB, vone);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vsCDEF = vsubq_f32(vsCDEF, vone);
vp0123 = vfmaq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmaq_f32(vt4567, vp4567, vt4567);
vp89AB = vfmaq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vtCDEF, vpCDEF, vtCDEF);
const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
vx0123 = vmulq_f32(vx0123, vbeta);
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vx4567 = vmulq_f32(vx4567, vbeta);
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vx89AB = vmulq_f32(vx89AB, vbeta);
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vxCDEF = vmulq_f32(vxCDEF, vbeta);
const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
vst1q_f32(output, vyCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 8,019 | 39.1 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-neonfma-rr1-p6-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/neon-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__neonfma_rr1_p6_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vprescale = vld1q_dup_f32(¶ms->neonfma_rr1_p6.prescale);
const float32x4_t valpha = vld1q_dup_f32(¶ms->neonfma_rr1_p6.alpha);
const float32x4_t vbeta = vld1q_dup_f32(¶ms->neonfma_rr1_p6.beta);
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p6.sat_cutoff);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p6.magic_bias);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_p6.log2e);
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.minus_ln2);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vxCDEF = vld1q_f32(input); input += 4;
float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
const float32x4_t vzGHIJ = vmaxq_f32(vmulq_f32(vxGHIJ, vprescale), vsat_cutoff);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vlog2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vlog2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vlog2e);
float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vminus_ln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vminus_ln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vminus_ln2);
float32x4_t vp0123 = vfmaq_f32(vc5, vc6, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc5, vc6, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc5, vc6, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc5, vc6, vtCDEF);
float32x4_t vpGHIJ = vfmaq_f32(vc5, vc6, vtGHIJ);
vp0123 = vfmaq_f32(vc4, vp0123, vt0123);
vp4567 = vfmaq_f32(vc4, vp4567, vt4567);
vp89AB = vfmaq_f32(vc4, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc4, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc4, vpGHIJ, vtGHIJ);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc3, vpGHIJ, vtGHIJ);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc2, vpGHIJ, vtGHIJ);
vp0123 = vmulq_f32(vp0123, vt0123);
vp4567 = vmulq_f32(vp4567, vt4567);
vp89AB = vmulq_f32(vp89AB, vt89AB);
vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
vpGHIJ = vmulq_f32(vpGHIJ, vtGHIJ);
vt0123 = vmulq_f32(vt0123, vs0123);
vs0123 = vsubq_f32(vs0123, vone);
vt4567 = vmulq_f32(vt4567, vs4567);
vs4567 = vsubq_f32(vs4567, vone);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vs89AB = vsubq_f32(vs89AB, vone);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vsCDEF = vsubq_f32(vsCDEF, vone);
vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
vsGHIJ = vsubq_f32(vsGHIJ, vone);
vp0123 = vfmaq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmaq_f32(vt4567, vp4567, vt4567);
vp89AB = vfmaq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vtCDEF, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
const float32x4_t veGHIJ = vmulq_f32(vaddq_f32(vpGHIJ, vsGHIJ), valpha);
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
vx0123 = vmulq_f32(vx0123, vbeta);
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vx4567 = vmulq_f32(vx4567, vbeta);
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vx89AB = vmulq_f32(vx89AB, vbeta);
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vxCDEF = vmulq_f32(vxCDEF, vbeta);
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
vxGHIJ = vmulq_f32(vxGHIJ, vbeta);
const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
const float32x4_t vyGHIJ = vbslq_f32(vmGHIJ, veGHIJ, vxGHIJ);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
vst1q_f32(output, vyCDEF); output += 4;
vst1q_f32(output, vyGHIJ); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 9,078 | 40.456621 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-neonfma-rr1-p6-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/neon-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__neonfma_rr1_p6_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vprescale = vld1q_dup_f32(¶ms->neonfma_rr1_p6.prescale);
const float32x4_t valpha = vld1q_dup_f32(¶ms->neonfma_rr1_p6.alpha);
const float32x4_t vbeta = vld1q_dup_f32(¶ms->neonfma_rr1_p6.beta);
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p6.sat_cutoff);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p6.magic_bias);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_p6.log2e);
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.minus_ln2);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vxCDEF = vld1q_f32(input); input += 4;
float32x4_t vxGHIJ = vld1q_f32(input); input += 4;
float32x4_t vxKLMN = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
const float32x4_t vz89AB = vmaxq_f32(vmulq_f32(vx89AB, vprescale), vsat_cutoff);
const float32x4_t vzCDEF = vmaxq_f32(vmulq_f32(vxCDEF, vprescale), vsat_cutoff);
const float32x4_t vzGHIJ = vmaxq_f32(vmulq_f32(vxGHIJ, vprescale), vsat_cutoff);
const float32x4_t vzKLMN = vmaxq_f32(vmulq_f32(vxKLMN, vprescale), vsat_cutoff);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vlog2e);
float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vlog2e);
float32x4_t vnCDEF = vfmaq_f32(vmagic_bias, vzCDEF, vlog2e);
float32x4_t vnGHIJ = vfmaq_f32(vmagic_bias, vzGHIJ, vlog2e);
float32x4_t vnKLMN = vfmaq_f32(vmagic_bias, vzKLMN, vlog2e);
float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vs89AB = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 23));
vn89AB = vsubq_f32(vn89AB, vmagic_bias);
float32x4_t vsCDEF = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 23));
vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
float32x4_t vsGHIJ = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnGHIJ), 23));
vnGHIJ = vsubq_f32(vnGHIJ, vmagic_bias);
float32x4_t vsKLMN = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vnKLMN), 23));
vnKLMN = vsubq_f32(vnKLMN, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vminus_ln2);
float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vminus_ln2);
float32x4_t vtCDEF = vfmaq_f32(vzCDEF, vnCDEF, vminus_ln2);
float32x4_t vtGHIJ = vfmaq_f32(vzGHIJ, vnGHIJ, vminus_ln2);
float32x4_t vtKLMN = vfmaq_f32(vzKLMN, vnKLMN, vminus_ln2);
float32x4_t vp0123 = vfmaq_f32(vc5, vc6, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc5, vc6, vt4567);
float32x4_t vp89AB = vfmaq_f32(vc5, vc6, vt89AB);
float32x4_t vpCDEF = vfmaq_f32(vc5, vc6, vtCDEF);
float32x4_t vpGHIJ = vfmaq_f32(vc5, vc6, vtGHIJ);
float32x4_t vpKLMN = vfmaq_f32(vc5, vc6, vtKLMN);
vp0123 = vfmaq_f32(vc4, vp0123, vt0123);
vp4567 = vfmaq_f32(vc4, vp4567, vt4567);
vp89AB = vfmaq_f32(vc4, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc4, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc4, vpGHIJ, vtGHIJ);
vpKLMN = vfmaq_f32(vc4, vpKLMN, vtKLMN);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp89AB = vfmaq_f32(vc3, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc3, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc3, vpGHIJ, vtGHIJ);
vpKLMN = vfmaq_f32(vc3, vpKLMN, vtKLMN);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp89AB = vfmaq_f32(vc2, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vc2, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vc2, vpGHIJ, vtGHIJ);
vpKLMN = vfmaq_f32(vc2, vpKLMN, vtKLMN);
vp0123 = vmulq_f32(vp0123, vt0123);
vp4567 = vmulq_f32(vp4567, vt4567);
vp89AB = vmulq_f32(vp89AB, vt89AB);
vpCDEF = vmulq_f32(vpCDEF, vtCDEF);
vpGHIJ = vmulq_f32(vpGHIJ, vtGHIJ);
vpKLMN = vmulq_f32(vpKLMN, vtKLMN);
vt0123 = vmulq_f32(vt0123, vs0123);
vs0123 = vsubq_f32(vs0123, vone);
vt4567 = vmulq_f32(vt4567, vs4567);
vs4567 = vsubq_f32(vs4567, vone);
vt89AB = vmulq_f32(vt89AB, vs89AB);
vs89AB = vsubq_f32(vs89AB, vone);
vtCDEF = vmulq_f32(vtCDEF, vsCDEF);
vsCDEF = vsubq_f32(vsCDEF, vone);
vtGHIJ = vmulq_f32(vtGHIJ, vsGHIJ);
vsGHIJ = vsubq_f32(vsGHIJ, vone);
vtKLMN = vmulq_f32(vtKLMN, vsKLMN);
vsKLMN = vsubq_f32(vsKLMN, vone);
vp0123 = vfmaq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmaq_f32(vt4567, vp4567, vt4567);
vp89AB = vfmaq_f32(vt89AB, vp89AB, vt89AB);
vpCDEF = vfmaq_f32(vtCDEF, vpCDEF, vtCDEF);
vpGHIJ = vfmaq_f32(vtGHIJ, vpGHIJ, vtGHIJ);
vpKLMN = vfmaq_f32(vtKLMN, vpKLMN, vtKLMN);
const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
const float32x4_t ve89AB = vmulq_f32(vaddq_f32(vp89AB, vs89AB), valpha);
const float32x4_t veCDEF = vmulq_f32(vaddq_f32(vpCDEF, vsCDEF), valpha);
const float32x4_t veGHIJ = vmulq_f32(vaddq_f32(vpGHIJ, vsGHIJ), valpha);
const float32x4_t veKLMN = vmulq_f32(vaddq_f32(vpKLMN, vsKLMN), valpha);
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
vx0123 = vmulq_f32(vx0123, vbeta);
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vx4567 = vmulq_f32(vx4567, vbeta);
const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
vx89AB = vmulq_f32(vx89AB, vbeta);
const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
vxCDEF = vmulq_f32(vxCDEF, vbeta);
const uint32x4_t vmGHIJ = vcltq_f32(vxGHIJ, vmovq_n_f32(0.0f));
vxGHIJ = vmulq_f32(vxGHIJ, vbeta);
const uint32x4_t vmKLMN = vcltq_f32(vxKLMN, vmovq_n_f32(0.0f));
vxKLMN = vmulq_f32(vxKLMN, vbeta);
const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
const float32x4_t vy89AB = vbslq_f32(vm89AB, ve89AB, vx89AB);
const float32x4_t vyCDEF = vbslq_f32(vmCDEF, veCDEF, vxCDEF);
const float32x4_t vyGHIJ = vbslq_f32(vmGHIJ, veGHIJ, vxGHIJ);
const float32x4_t vyKLMN = vbslq_f32(vmKLMN, veKLMN, vxKLMN);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
vst1q_f32(output, vy89AB); output += 4;
vst1q_f32(output, vyCDEF); output += 4;
vst1q_f32(output, vyGHIJ); output += 4;
vst1q_f32(output, vyKLMN); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 10,137 | 41.596639 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-neonfma-rr1-p6-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/neon-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__neonfma_rr1_p6_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vprescale = vld1q_dup_f32(¶ms->neonfma_rr1_p6.prescale);
const float32x4_t valpha = vld1q_dup_f32(¶ms->neonfma_rr1_p6.alpha);
const float32x4_t vbeta = vld1q_dup_f32(¶ms->neonfma_rr1_p6.beta);
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p6.sat_cutoff);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p6.magic_bias);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_p6.log2e);
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.minus_ln2);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 3,694 | 33.53271 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-neonfma-rr1-p6-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/neon-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__neonfma_rr1_p6_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vprescale = vld1q_dup_f32(¶ms->neonfma_rr1_p6.prescale);
const float32x4_t valpha = vld1q_dup_f32(¶ms->neonfma_rr1_p6.alpha);
const float32x4_t vbeta = vld1q_dup_f32(¶ms->neonfma_rr1_p6.beta);
const float32x4_t vsat_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_p6.sat_cutoff);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_p6.magic_bias);
const float32x4_t vlog2e = vld1q_dup_f32(¶ms->neonfma_rr1_p6.log2e);
const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.minus_ln2);
const float32x4_t vc6 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c6);
const float32x4_t vc5 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c5);
const float32x4_t vc4 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c4);
const float32x4_t vc3 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c3);
const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.c2);
const float32x4_t vone = vmovq_n_f32(1.0f);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
const float32x4_t vz0123 = vmaxq_f32(vmulq_f32(vx0123, vprescale), vsat_cutoff);
const float32x4_t vz4567 = vmaxq_f32(vmulq_f32(vx4567, vprescale), vsat_cutoff);
float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vlog2e);
float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vlog2e);
float32x4_t vs0123 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 23));
vn0123 = vsubq_f32(vn0123, vmagic_bias);
float32x4_t vs4567 = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 23));
vn4567 = vsubq_f32(vn4567, vmagic_bias);
float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vminus_ln2);
float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vminus_ln2);
float32x4_t vp0123 = vfmaq_f32(vc5, vc6, vt0123);
float32x4_t vp4567 = vfmaq_f32(vc5, vc6, vt4567);
vp0123 = vfmaq_f32(vc4, vp0123, vt0123);
vp4567 = vfmaq_f32(vc4, vp4567, vt4567);
vp0123 = vfmaq_f32(vc3, vp0123, vt0123);
vp4567 = vfmaq_f32(vc3, vp4567, vt4567);
vp0123 = vfmaq_f32(vc2, vp0123, vt0123);
vp4567 = vfmaq_f32(vc2, vp4567, vt4567);
vp0123 = vmulq_f32(vp0123, vt0123);
vp4567 = vmulq_f32(vp4567, vt4567);
vt0123 = vmulq_f32(vt0123, vs0123);
vs0123 = vsubq_f32(vs0123, vone);
vt4567 = vmulq_f32(vt4567, vs4567);
vs4567 = vsubq_f32(vs4567, vone);
vp0123 = vfmaq_f32(vt0123, vp0123, vt0123);
vp4567 = vfmaq_f32(vt4567, vp4567, vt4567);
const float32x4_t ve0123 = vmulq_f32(vaddq_f32(vp0123, vs0123), valpha);
const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha);
const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
vx0123 = vmulq_f32(vx0123, vbeta);
const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
vx4567 = vmulq_f32(vx4567, vbeta);
const float32x4_t vy0123 = vbslq_f32(vm0123, ve0123, vx0123);
const float32x4_t vy4567 = vbslq_f32(vm4567, ve4567, vx4567);
vst1q_f32(output, vy0123); output += 4;
vst1q_f32(output, vy4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
vst1q_f32(output, vy); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
const float32x4_t vz = vmaxq_f32(vmulq_f32(vx, vprescale), vsat_cutoff);
float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vlog2e);
float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
vn = vsubq_f32(vn, vmagic_bias);
float32x4_t vt = vfmaq_f32(vz, vn, vminus_ln2);
float32x4_t vp = vfmaq_f32(vc5, vc6, vt);
vp = vfmaq_f32(vc4, vp, vt);
vp = vfmaq_f32(vc3, vp, vt);
vp = vfmaq_f32(vc2, vp, vt);
vp = vmulq_f32(vp, vt);
vt = vmulq_f32(vt, vs);
vs = vsubq_f32(vs, vone);
vp = vfmaq_f32(vt, vp, vt);
const float32x4_t ve = vmulq_f32(vaddq_f32(vp, vs), valpha);
const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
vx = vmulq_f32(vx, vbeta);
const float32x4_t vy = vbslq_f32(vm, ve, vx);
float32x2_t vy_lo = vget_low_f32(vy);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vy_lo); output += 2;
vy_lo = vget_high_f32(vy);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vy_lo, 0);
}
}
}
| 5,898 | 35.41358 | 95 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-scalar-rr2-lut16-p3-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vprescale = params->scalar_rr2_lut16_p3.prescale;
const float valpha = params->scalar_rr2_lut16_p3.alpha;
const float vbeta = params->scalar_rr2_lut16_p3.beta;
const float vmagic_bias = params->scalar_rr2_lut16_p3.magic_bias;
const float vlog2e = params->scalar_rr2_lut16_p3.log2e;
const uint32_t vindex_mask = UINT32_C(0xF);
const float vsat_cutoff = params->scalar_rr2_lut16_p3.sat_cutoff;
const float vminus_ln2_hi = params->scalar_rr2_lut16_p3.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_lut16_p3.minus_ln2_lo;
const float vc3 = params->scalar_rr2_lut16_p3.c3;
const float vc2 = params->scalar_rr2_lut16_p3.c2;
const float vone = params->scalar_rr2_lut16_p3.one;
do {
float vx = *input++;
const float vz = vx * vprescale;
float vn = vz * vlog2e + vmagic_bias;
const uint32_t ven = float_as_uint32(vn) << 19;
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
vn -= vmagic_bias;
float vt = vn * vminus_ln2_hi + vz;
float vs = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx] + ven);
vt = vn * vminus_ln2_lo + vt;
if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
vs = 0.0f;
vt = 0.0f;
}
float vp = vc3 * vt + vc2;
vp *= vt;
vt *= vs;
vs -= vone;
vp = vp * vt + vt;
const float ve = (vp + vs) * valpha;
float vy = vx * vbeta;
if XNN_UNPREDICTABLE(vx < 0.0f) {
vy = ve;
}
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
| 2,290 | 27.283951 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-scalar-rr2-lut16-p3-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vprescale = params->scalar_rr2_lut16_p3.prescale;
const float valpha = params->scalar_rr2_lut16_p3.alpha;
const float vbeta = params->scalar_rr2_lut16_p3.beta;
const float vmagic_bias = params->scalar_rr2_lut16_p3.magic_bias;
const float vlog2e = params->scalar_rr2_lut16_p3.log2e;
const uint32_t vindex_mask = UINT32_C(0xF);
const float vsat_cutoff = params->scalar_rr2_lut16_p3.sat_cutoff;
const float vminus_ln2_hi = params->scalar_rr2_lut16_p3.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_lut16_p3.minus_ln2_lo;
const float vc3 = params->scalar_rr2_lut16_p3.c3;
const float vc2 = params->scalar_rr2_lut16_p3.c2;
const float vone = params->scalar_rr2_lut16_p3.one;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
input += 2;
const float vz0 = vx0 * vprescale;
const float vz1 = vx1 * vprescale;
float vn0 = vz0 * vlog2e + vmagic_bias;
float vn1 = vz1 * vlog2e + vmagic_bias;
const uint32_t ven0 = float_as_uint32(vn0) << 19;
const uint32_t vidx0 = float_as_uint32(vn0) & vindex_mask;
vn0 -= vmagic_bias;
const uint32_t ven1 = float_as_uint32(vn1) << 19;
const uint32_t vidx1 = float_as_uint32(vn1) & vindex_mask;
vn1 -= vmagic_bias;
float vt0 = vn0 * vminus_ln2_hi + vz0;
float vs0 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
float vt1 = vn1 * vminus_ln2_hi + vz1;
float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
vt0 = vn0 * vminus_ln2_lo + vt0;
if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) {
vs0 = 0.0f;
vt0 = 0.0f;
}
vt1 = vn1 * vminus_ln2_lo + vt1;
if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) {
vs1 = 0.0f;
vt1 = 0.0f;
}
float vp0 = vc3 * vt0 + vc2;
float vp1 = vc3 * vt1 + vc2;
vp0 *= vt0;
vp1 *= vt1;
vt0 *= vs0;
vs0 -= vone;
vt1 *= vs1;
vs1 -= vone;
vp0 = vp0 * vt0 + vt0;
vp1 = vp1 * vt1 + vt1;
const float ve0 = (vp0 + vs0) * valpha;
float vy0 = vx0 * vbeta;
const float ve1 = (vp1 + vs1) * valpha;
float vy1 = vx1 * vbeta;
if XNN_UNPREDICTABLE(vx0 < 0.0f) {
vy0 = ve0;
}
if XNN_UNPREDICTABLE(vx1 < 0.0f) {
vy1 = ve1;
}
output[0] = vy0;
output[1] = vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
float vx = *input;
const float vz = vx * vprescale;
float vn = vz * vlog2e + vmagic_bias;
const uint32_t ven = float_as_uint32(vn) << 19;
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
vn -= vmagic_bias;
float vt = vn * vminus_ln2_hi + vz;
float vs = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx] + ven);
vt = vn * vminus_ln2_lo + vt;
if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
vs = 0.0f;
vt = 0.0f;
}
float vp = vc3 * vt + vc2;
vp *= vt;
vt *= vs;
vs -= vone;
vp = vp * vt + vt;
const float ve = (vp + vs) * valpha;
float vy = vx * vbeta;
if XNN_UNPREDICTABLE(vx < 0.0f) {
vy = ve;
}
*output = vy;
}
}
| 3,902 | 26.293706 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-scalar-rr2-lut16-p3-x3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vprescale = params->scalar_rr2_lut16_p3.prescale;
const float valpha = params->scalar_rr2_lut16_p3.alpha;
const float vbeta = params->scalar_rr2_lut16_p3.beta;
const float vmagic_bias = params->scalar_rr2_lut16_p3.magic_bias;
const float vlog2e = params->scalar_rr2_lut16_p3.log2e;
const uint32_t vindex_mask = UINT32_C(0xF);
const float vsat_cutoff = params->scalar_rr2_lut16_p3.sat_cutoff;
const float vminus_ln2_hi = params->scalar_rr2_lut16_p3.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_lut16_p3.minus_ln2_lo;
const float vc3 = params->scalar_rr2_lut16_p3.c3;
const float vc2 = params->scalar_rr2_lut16_p3.c2;
const float vone = params->scalar_rr2_lut16_p3.one;
for (; batch >= 3 * sizeof(float); batch -= 3 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
input += 3;
const float vz0 = vx0 * vprescale;
const float vz1 = vx1 * vprescale;
const float vz2 = vx2 * vprescale;
float vn0 = vz0 * vlog2e + vmagic_bias;
float vn1 = vz1 * vlog2e + vmagic_bias;
float vn2 = vz2 * vlog2e + vmagic_bias;
const uint32_t ven0 = float_as_uint32(vn0) << 19;
const uint32_t vidx0 = float_as_uint32(vn0) & vindex_mask;
vn0 -= vmagic_bias;
const uint32_t ven1 = float_as_uint32(vn1) << 19;
const uint32_t vidx1 = float_as_uint32(vn1) & vindex_mask;
vn1 -= vmagic_bias;
const uint32_t ven2 = float_as_uint32(vn2) << 19;
const uint32_t vidx2 = float_as_uint32(vn2) & vindex_mask;
vn2 -= vmagic_bias;
float vt0 = vn0 * vminus_ln2_hi + vz0;
float vs0 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
float vt1 = vn1 * vminus_ln2_hi + vz1;
float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
float vt2 = vn2 * vminus_ln2_hi + vz2;
float vs2 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx2] + ven2);
vt0 = vn0 * vminus_ln2_lo + vt0;
if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) {
vs0 = 0.0f;
vt0 = 0.0f;
}
vt1 = vn1 * vminus_ln2_lo + vt1;
if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) {
vs1 = 0.0f;
vt1 = 0.0f;
}
vt2 = vn2 * vminus_ln2_lo + vt2;
if XNN_UNPREDICTABLE(vz2 <= vsat_cutoff) {
vs2 = 0.0f;
vt2 = 0.0f;
}
float vp0 = vc3 * vt0 + vc2;
float vp1 = vc3 * vt1 + vc2;
float vp2 = vc3 * vt2 + vc2;
vp0 *= vt0;
vp1 *= vt1;
vp2 *= vt2;
vt0 *= vs0;
vs0 -= vone;
vt1 *= vs1;
vs1 -= vone;
vt2 *= vs2;
vs2 -= vone;
vp0 = vp0 * vt0 + vt0;
vp1 = vp1 * vt1 + vt1;
vp2 = vp2 * vt2 + vt2;
const float ve0 = (vp0 + vs0) * valpha;
float vy0 = vx0 * vbeta;
const float ve1 = (vp1 + vs1) * valpha;
float vy1 = vx1 * vbeta;
const float ve2 = (vp2 + vs2) * valpha;
float vy2 = vx2 * vbeta;
if XNN_UNPREDICTABLE(vx0 < 0.0f) {
vy0 = ve0;
}
if XNN_UNPREDICTABLE(vx1 < 0.0f) {
vy1 = ve1;
}
if XNN_UNPREDICTABLE(vx2 < 0.0f) {
vy2 = ve2;
}
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output += 3;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
const float vz = vx * vprescale;
float vn = vz * vlog2e + vmagic_bias;
const uint32_t ven = float_as_uint32(vn) << 19;
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
vn -= vmagic_bias;
float vt = vn * vminus_ln2_hi + vz;
float vs = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx] + ven);
vt = vn * vminus_ln2_lo + vt;
if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
vs = 0.0f;
vt = 0.0f;
}
float vp = vc3 * vt + vc2;
vp *= vt;
vt *= vs;
vs -= vone;
vp = vp * vt + vt;
const float ve = (vp + vs) * valpha;
float vy = vx * vbeta;
if XNN_UNPREDICTABLE(vx < 0.0f) {
vy = ve;
}
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 4,782 | 26.97076 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-scalar-rr2-lut16-p3-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vprescale = params->scalar_rr2_lut16_p3.prescale;
const float valpha = params->scalar_rr2_lut16_p3.alpha;
const float vbeta = params->scalar_rr2_lut16_p3.beta;
const float vmagic_bias = params->scalar_rr2_lut16_p3.magic_bias;
const float vlog2e = params->scalar_rr2_lut16_p3.log2e;
const uint32_t vindex_mask = UINT32_C(0xF);
const float vsat_cutoff = params->scalar_rr2_lut16_p3.sat_cutoff;
const float vminus_ln2_hi = params->scalar_rr2_lut16_p3.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_lut16_p3.minus_ln2_lo;
const float vc3 = params->scalar_rr2_lut16_p3.c3;
const float vc2 = params->scalar_rr2_lut16_p3.c2;
const float vone = params->scalar_rr2_lut16_p3.one;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
float vx3 = input[3];
input += 4;
const float vz0 = vx0 * vprescale;
const float vz1 = vx1 * vprescale;
const float vz2 = vx2 * vprescale;
const float vz3 = vx3 * vprescale;
float vn0 = vz0 * vlog2e + vmagic_bias;
float vn1 = vz1 * vlog2e + vmagic_bias;
float vn2 = vz2 * vlog2e + vmagic_bias;
float vn3 = vz3 * vlog2e + vmagic_bias;
const uint32_t ven0 = float_as_uint32(vn0) << 19;
const uint32_t vidx0 = float_as_uint32(vn0) & vindex_mask;
vn0 -= vmagic_bias;
const uint32_t ven1 = float_as_uint32(vn1) << 19;
const uint32_t vidx1 = float_as_uint32(vn1) & vindex_mask;
vn1 -= vmagic_bias;
const uint32_t ven2 = float_as_uint32(vn2) << 19;
const uint32_t vidx2 = float_as_uint32(vn2) & vindex_mask;
vn2 -= vmagic_bias;
const uint32_t ven3 = float_as_uint32(vn3) << 19;
const uint32_t vidx3 = float_as_uint32(vn3) & vindex_mask;
vn3 -= vmagic_bias;
float vt0 = vn0 * vminus_ln2_hi + vz0;
float vs0 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
float vt1 = vn1 * vminus_ln2_hi + vz1;
float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
float vt2 = vn2 * vminus_ln2_hi + vz2;
float vs2 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx2] + ven2);
float vt3 = vn3 * vminus_ln2_hi + vz3;
float vs3 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx3] + ven3);
vt0 = vn0 * vminus_ln2_lo + vt0;
if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) {
vs0 = 0.0f;
vt0 = 0.0f;
}
vt1 = vn1 * vminus_ln2_lo + vt1;
if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) {
vs1 = 0.0f;
vt1 = 0.0f;
}
vt2 = vn2 * vminus_ln2_lo + vt2;
if XNN_UNPREDICTABLE(vz2 <= vsat_cutoff) {
vs2 = 0.0f;
vt2 = 0.0f;
}
vt3 = vn3 * vminus_ln2_lo + vt3;
if XNN_UNPREDICTABLE(vz3 <= vsat_cutoff) {
vs3 = 0.0f;
vt3 = 0.0f;
}
float vp0 = vc3 * vt0 + vc2;
float vp1 = vc3 * vt1 + vc2;
float vp2 = vc3 * vt2 + vc2;
float vp3 = vc3 * vt3 + vc2;
vp0 *= vt0;
vp1 *= vt1;
vp2 *= vt2;
vp3 *= vt3;
vt0 *= vs0;
vs0 -= vone;
vt1 *= vs1;
vs1 -= vone;
vt2 *= vs2;
vs2 -= vone;
vt3 *= vs3;
vs3 -= vone;
vp0 = vp0 * vt0 + vt0;
vp1 = vp1 * vt1 + vt1;
vp2 = vp2 * vt2 + vt2;
vp3 = vp3 * vt3 + vt3;
const float ve0 = (vp0 + vs0) * valpha;
float vy0 = vx0 * vbeta;
const float ve1 = (vp1 + vs1) * valpha;
float vy1 = vx1 * vbeta;
const float ve2 = (vp2 + vs2) * valpha;
float vy2 = vx2 * vbeta;
const float ve3 = (vp3 + vs3) * valpha;
float vy3 = vx3 * vbeta;
if XNN_UNPREDICTABLE(vx0 < 0.0f) {
vy0 = ve0;
}
if XNN_UNPREDICTABLE(vx1 < 0.0f) {
vy1 = ve1;
}
if XNN_UNPREDICTABLE(vx2 < 0.0f) {
vy2 = ve2;
}
if XNN_UNPREDICTABLE(vx3 < 0.0f) {
vy3 = ve3;
}
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
const float vz = vx * vprescale;
float vn = vz * vlog2e + vmagic_bias;
const uint32_t ven = float_as_uint32(vn) << 19;
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
vn -= vmagic_bias;
float vt = vn * vminus_ln2_hi + vz;
float vs = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx] + ven);
vt = vn * vminus_ln2_lo + vt;
if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
vs = 0.0f;
vt = 0.0f;
}
float vp = vc3 * vt + vc2;
vp *= vt;
vt *= vs;
vs -= vone;
vp = vp * vt + vt;
const float ve = (vp + vs) * valpha;
float vy = vx * vbeta;
if XNN_UNPREDICTABLE(vx < 0.0f) {
vy = ve;
}
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 5,544 | 27.435897 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-scalar-rr2-lut16-p3-x5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vprescale = params->scalar_rr2_lut16_p3.prescale;
const float valpha = params->scalar_rr2_lut16_p3.alpha;
const float vbeta = params->scalar_rr2_lut16_p3.beta;
const float vmagic_bias = params->scalar_rr2_lut16_p3.magic_bias;
const float vlog2e = params->scalar_rr2_lut16_p3.log2e;
const uint32_t vindex_mask = UINT32_C(0xF);
const float vsat_cutoff = params->scalar_rr2_lut16_p3.sat_cutoff;
const float vminus_ln2_hi = params->scalar_rr2_lut16_p3.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_lut16_p3.minus_ln2_lo;
const float vc3 = params->scalar_rr2_lut16_p3.c3;
const float vc2 = params->scalar_rr2_lut16_p3.c2;
const float vone = params->scalar_rr2_lut16_p3.one;
for (; batch >= 5 * sizeof(float); batch -= 5 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
float vx3 = input[3];
float vx4 = input[4];
input += 5;
const float vz0 = vx0 * vprescale;
const float vz1 = vx1 * vprescale;
const float vz2 = vx2 * vprescale;
const float vz3 = vx3 * vprescale;
const float vz4 = vx4 * vprescale;
float vn0 = vz0 * vlog2e + vmagic_bias;
float vn1 = vz1 * vlog2e + vmagic_bias;
float vn2 = vz2 * vlog2e + vmagic_bias;
float vn3 = vz3 * vlog2e + vmagic_bias;
float vn4 = vz4 * vlog2e + vmagic_bias;
const uint32_t ven0 = float_as_uint32(vn0) << 19;
const uint32_t vidx0 = float_as_uint32(vn0) & vindex_mask;
vn0 -= vmagic_bias;
const uint32_t ven1 = float_as_uint32(vn1) << 19;
const uint32_t vidx1 = float_as_uint32(vn1) & vindex_mask;
vn1 -= vmagic_bias;
const uint32_t ven2 = float_as_uint32(vn2) << 19;
const uint32_t vidx2 = float_as_uint32(vn2) & vindex_mask;
vn2 -= vmagic_bias;
const uint32_t ven3 = float_as_uint32(vn3) << 19;
const uint32_t vidx3 = float_as_uint32(vn3) & vindex_mask;
vn3 -= vmagic_bias;
const uint32_t ven4 = float_as_uint32(vn4) << 19;
const uint32_t vidx4 = float_as_uint32(vn4) & vindex_mask;
vn4 -= vmagic_bias;
float vt0 = vn0 * vminus_ln2_hi + vz0;
float vs0 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
float vt1 = vn1 * vminus_ln2_hi + vz1;
float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
float vt2 = vn2 * vminus_ln2_hi + vz2;
float vs2 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx2] + ven2);
float vt3 = vn3 * vminus_ln2_hi + vz3;
float vs3 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx3] + ven3);
float vt4 = vn4 * vminus_ln2_hi + vz4;
float vs4 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx4] + ven4);
vt0 = vn0 * vminus_ln2_lo + vt0;
if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) {
vs0 = 0.0f;
vt0 = 0.0f;
}
vt1 = vn1 * vminus_ln2_lo + vt1;
if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) {
vs1 = 0.0f;
vt1 = 0.0f;
}
vt2 = vn2 * vminus_ln2_lo + vt2;
if XNN_UNPREDICTABLE(vz2 <= vsat_cutoff) {
vs2 = 0.0f;
vt2 = 0.0f;
}
vt3 = vn3 * vminus_ln2_lo + vt3;
if XNN_UNPREDICTABLE(vz3 <= vsat_cutoff) {
vs3 = 0.0f;
vt3 = 0.0f;
}
vt4 = vn4 * vminus_ln2_lo + vt4;
if XNN_UNPREDICTABLE(vz4 <= vsat_cutoff) {
vs4 = 0.0f;
vt4 = 0.0f;
}
float vp0 = vc3 * vt0 + vc2;
float vp1 = vc3 * vt1 + vc2;
float vp2 = vc3 * vt2 + vc2;
float vp3 = vc3 * vt3 + vc2;
float vp4 = vc3 * vt4 + vc2;
vp0 *= vt0;
vp1 *= vt1;
vp2 *= vt2;
vp3 *= vt3;
vp4 *= vt4;
vt0 *= vs0;
vs0 -= vone;
vt1 *= vs1;
vs1 -= vone;
vt2 *= vs2;
vs2 -= vone;
vt3 *= vs3;
vs3 -= vone;
vt4 *= vs4;
vs4 -= vone;
vp0 = vp0 * vt0 + vt0;
vp1 = vp1 * vt1 + vt1;
vp2 = vp2 * vt2 + vt2;
vp3 = vp3 * vt3 + vt3;
vp4 = vp4 * vt4 + vt4;
const float ve0 = (vp0 + vs0) * valpha;
float vy0 = vx0 * vbeta;
const float ve1 = (vp1 + vs1) * valpha;
float vy1 = vx1 * vbeta;
const float ve2 = (vp2 + vs2) * valpha;
float vy2 = vx2 * vbeta;
const float ve3 = (vp3 + vs3) * valpha;
float vy3 = vx3 * vbeta;
const float ve4 = (vp4 + vs4) * valpha;
float vy4 = vx4 * vbeta;
if XNN_UNPREDICTABLE(vx0 < 0.0f) {
vy0 = ve0;
}
if XNN_UNPREDICTABLE(vx1 < 0.0f) {
vy1 = ve1;
}
if XNN_UNPREDICTABLE(vx2 < 0.0f) {
vy2 = ve2;
}
if XNN_UNPREDICTABLE(vx3 < 0.0f) {
vy3 = ve3;
}
if XNN_UNPREDICTABLE(vx4 < 0.0f) {
vy4 = ve4;
}
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output[4] = vy4;
output += 5;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
const float vz = vx * vprescale;
float vn = vz * vlog2e + vmagic_bias;
const uint32_t ven = float_as_uint32(vn) << 19;
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
vn -= vmagic_bias;
float vt = vn * vminus_ln2_hi + vz;
float vs = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx] + ven);
vt = vn * vminus_ln2_lo + vt;
if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
vs = 0.0f;
vt = 0.0f;
}
float vp = vc3 * vt + vc2;
vp *= vt;
vt *= vs;
vs -= vone;
vp = vp * vt + vt;
const float ve = (vp + vs) * valpha;
float vy = vx * vbeta;
if XNN_UNPREDICTABLE(vx < 0.0f) {
vy = ve;
}
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 6,306 | 27.799087 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-scalar-rr2-lut16-p3-x6.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x6(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vprescale = params->scalar_rr2_lut16_p3.prescale;
const float valpha = params->scalar_rr2_lut16_p3.alpha;
const float vbeta = params->scalar_rr2_lut16_p3.beta;
const float vmagic_bias = params->scalar_rr2_lut16_p3.magic_bias;
const float vlog2e = params->scalar_rr2_lut16_p3.log2e;
const uint32_t vindex_mask = UINT32_C(0xF);
const float vsat_cutoff = params->scalar_rr2_lut16_p3.sat_cutoff;
const float vminus_ln2_hi = params->scalar_rr2_lut16_p3.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_lut16_p3.minus_ln2_lo;
const float vc3 = params->scalar_rr2_lut16_p3.c3;
const float vc2 = params->scalar_rr2_lut16_p3.c2;
const float vone = params->scalar_rr2_lut16_p3.one;
for (; batch >= 6 * sizeof(float); batch -= 6 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
float vx3 = input[3];
float vx4 = input[4];
float vx5 = input[5];
input += 6;
const float vz0 = vx0 * vprescale;
const float vz1 = vx1 * vprescale;
const float vz2 = vx2 * vprescale;
const float vz3 = vx3 * vprescale;
const float vz4 = vx4 * vprescale;
const float vz5 = vx5 * vprescale;
float vn0 = vz0 * vlog2e + vmagic_bias;
float vn1 = vz1 * vlog2e + vmagic_bias;
float vn2 = vz2 * vlog2e + vmagic_bias;
float vn3 = vz3 * vlog2e + vmagic_bias;
float vn4 = vz4 * vlog2e + vmagic_bias;
float vn5 = vz5 * vlog2e + vmagic_bias;
const uint32_t ven0 = float_as_uint32(vn0) << 19;
const uint32_t vidx0 = float_as_uint32(vn0) & vindex_mask;
vn0 -= vmagic_bias;
const uint32_t ven1 = float_as_uint32(vn1) << 19;
const uint32_t vidx1 = float_as_uint32(vn1) & vindex_mask;
vn1 -= vmagic_bias;
const uint32_t ven2 = float_as_uint32(vn2) << 19;
const uint32_t vidx2 = float_as_uint32(vn2) & vindex_mask;
vn2 -= vmagic_bias;
const uint32_t ven3 = float_as_uint32(vn3) << 19;
const uint32_t vidx3 = float_as_uint32(vn3) & vindex_mask;
vn3 -= vmagic_bias;
const uint32_t ven4 = float_as_uint32(vn4) << 19;
const uint32_t vidx4 = float_as_uint32(vn4) & vindex_mask;
vn4 -= vmagic_bias;
const uint32_t ven5 = float_as_uint32(vn5) << 19;
const uint32_t vidx5 = float_as_uint32(vn5) & vindex_mask;
vn5 -= vmagic_bias;
float vt0 = vn0 * vminus_ln2_hi + vz0;
float vs0 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
float vt1 = vn1 * vminus_ln2_hi + vz1;
float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
float vt2 = vn2 * vminus_ln2_hi + vz2;
float vs2 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx2] + ven2);
float vt3 = vn3 * vminus_ln2_hi + vz3;
float vs3 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx3] + ven3);
float vt4 = vn4 * vminus_ln2_hi + vz4;
float vs4 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx4] + ven4);
float vt5 = vn5 * vminus_ln2_hi + vz5;
float vs5 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx5] + ven5);
vt0 = vn0 * vminus_ln2_lo + vt0;
if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) {
vs0 = 0.0f;
vt0 = 0.0f;
}
vt1 = vn1 * vminus_ln2_lo + vt1;
if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) {
vs1 = 0.0f;
vt1 = 0.0f;
}
vt2 = vn2 * vminus_ln2_lo + vt2;
if XNN_UNPREDICTABLE(vz2 <= vsat_cutoff) {
vs2 = 0.0f;
vt2 = 0.0f;
}
vt3 = vn3 * vminus_ln2_lo + vt3;
if XNN_UNPREDICTABLE(vz3 <= vsat_cutoff) {
vs3 = 0.0f;
vt3 = 0.0f;
}
vt4 = vn4 * vminus_ln2_lo + vt4;
if XNN_UNPREDICTABLE(vz4 <= vsat_cutoff) {
vs4 = 0.0f;
vt4 = 0.0f;
}
vt5 = vn5 * vminus_ln2_lo + vt5;
if XNN_UNPREDICTABLE(vz5 <= vsat_cutoff) {
vs5 = 0.0f;
vt5 = 0.0f;
}
float vp0 = vc3 * vt0 + vc2;
float vp1 = vc3 * vt1 + vc2;
float vp2 = vc3 * vt2 + vc2;
float vp3 = vc3 * vt3 + vc2;
float vp4 = vc3 * vt4 + vc2;
float vp5 = vc3 * vt5 + vc2;
vp0 *= vt0;
vp1 *= vt1;
vp2 *= vt2;
vp3 *= vt3;
vp4 *= vt4;
vp5 *= vt5;
vt0 *= vs0;
vs0 -= vone;
vt1 *= vs1;
vs1 -= vone;
vt2 *= vs2;
vs2 -= vone;
vt3 *= vs3;
vs3 -= vone;
vt4 *= vs4;
vs4 -= vone;
vt5 *= vs5;
vs5 -= vone;
vp0 = vp0 * vt0 + vt0;
vp1 = vp1 * vt1 + vt1;
vp2 = vp2 * vt2 + vt2;
vp3 = vp3 * vt3 + vt3;
vp4 = vp4 * vt4 + vt4;
vp5 = vp5 * vt5 + vt5;
const float ve0 = (vp0 + vs0) * valpha;
float vy0 = vx0 * vbeta;
const float ve1 = (vp1 + vs1) * valpha;
float vy1 = vx1 * vbeta;
const float ve2 = (vp2 + vs2) * valpha;
float vy2 = vx2 * vbeta;
const float ve3 = (vp3 + vs3) * valpha;
float vy3 = vx3 * vbeta;
const float ve4 = (vp4 + vs4) * valpha;
float vy4 = vx4 * vbeta;
const float ve5 = (vp5 + vs5) * valpha;
float vy5 = vx5 * vbeta;
if XNN_UNPREDICTABLE(vx0 < 0.0f) {
vy0 = ve0;
}
if XNN_UNPREDICTABLE(vx1 < 0.0f) {
vy1 = ve1;
}
if XNN_UNPREDICTABLE(vx2 < 0.0f) {
vy2 = ve2;
}
if XNN_UNPREDICTABLE(vx3 < 0.0f) {
vy3 = ve3;
}
if XNN_UNPREDICTABLE(vx4 < 0.0f) {
vy4 = ve4;
}
if XNN_UNPREDICTABLE(vx5 < 0.0f) {
vy5 = ve5;
}
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output[4] = vy4;
output[5] = vy5;
output += 6;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
const float vz = vx * vprescale;
float vn = vz * vlog2e + vmagic_bias;
const uint32_t ven = float_as_uint32(vn) << 19;
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
vn -= vmagic_bias;
float vt = vn * vminus_ln2_hi + vz;
float vs = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx] + ven);
vt = vn * vminus_ln2_lo + vt;
if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
vs = 0.0f;
vt = 0.0f;
}
float vp = vc3 * vt + vc2;
vp *= vt;
vt *= vs;
vs -= vone;
vp = vp * vt + vt;
const float ve = (vp + vs) * valpha;
float vy = vx * vbeta;
if XNN_UNPREDICTABLE(vx < 0.0f) {
vy = ve;
}
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 7,068 | 28.090535 | 77 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-scalar-rr2-p6-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/scalar-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__scalar_rr2_p6_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vprescale = params->scalar_rr2_p6.prescale;
const float valpha = params->scalar_rr2_p6.alpha;
const float vbeta = params->scalar_rr2_p6.beta;
const float vmagic_bias = params->scalar_rr2_p6.magic_bias;
const float vlog2e = params->scalar_rr2_p6.log2e;
const float vsat_cutoff = params->scalar_rr2_p6.sat_cutoff;
const float vminus_ln2_hi = params->scalar_rr2_p6.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_p6.minus_ln2_lo;
const float vc6 = params->scalar_rr2_p6.c6;
const float vc5 = params->scalar_rr2_p6.c5;
const float vc4 = params->scalar_rr2_p6.c4;
const float vc3 = params->scalar_rr2_p6.c3;
const float vc2 = params->scalar_rr2_p6.c2;
const float vone = params->scalar_rr2_p6.one;
do {
float vx = *input++;
const float vz = vx * vprescale;
float vn = vz * vlog2e + vmagic_bias;
float vs = uint32_as_float(float_as_uint32(vn) << 23);
vn -= vmagic_bias;
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
vs = 0.0f;
vt = 0.0f;
}
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
vt *= vs;
vs -= vone;
vp = vp * vt + vt;
const float ve = (vp + vs) * valpha;
float vy = vx * vbeta;
if XNN_UNPREDICTABLE(vx < 0.0f) {
vy = ve;
}
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
| 2,176 | 25.54878 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-scalar-rr2-p6-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/scalar-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__scalar_rr2_p6_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vprescale = params->scalar_rr2_p6.prescale;
const float valpha = params->scalar_rr2_p6.alpha;
const float vbeta = params->scalar_rr2_p6.beta;
const float vmagic_bias = params->scalar_rr2_p6.magic_bias;
const float vlog2e = params->scalar_rr2_p6.log2e;
const float vsat_cutoff = params->scalar_rr2_p6.sat_cutoff;
const float vminus_ln2_hi = params->scalar_rr2_p6.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_p6.minus_ln2_lo;
const float vc6 = params->scalar_rr2_p6.c6;
const float vc5 = params->scalar_rr2_p6.c5;
const float vc4 = params->scalar_rr2_p6.c4;
const float vc3 = params->scalar_rr2_p6.c3;
const float vc2 = params->scalar_rr2_p6.c2;
const float vone = params->scalar_rr2_p6.one;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
input += 2;
const float vz0 = vx0 * vprescale;
const float vz1 = vx1 * vprescale;
float vn0 = vz0 * vlog2e + vmagic_bias;
float vn1 = vz1 * vlog2e + vmagic_bias;
float vs0 = uint32_as_float(float_as_uint32(vn0) << 23);
vn0 -= vmagic_bias;
float vs1 = uint32_as_float(float_as_uint32(vn1) << 23);
vn1 -= vmagic_bias;
float vt0 = vn0 * vminus_ln2_hi + vz0;
float vt1 = vn1 * vminus_ln2_hi + vz1;
vt0 = vn0 * vminus_ln2_lo + vt0;
vt1 = vn1 * vminus_ln2_lo + vt1;
if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) {
vs0 = 0.0f;
vt0 = 0.0f;
}
if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) {
vs1 = 0.0f;
vt1 = 0.0f;
}
float vp0 = vc6 * vt0 + vc5;
float vp1 = vc6 * vt1 + vc5;
vp0 = vp0 * vt0 + vc4;
vp1 = vp1 * vt1 + vc4;
vp0 = vp0 * vt0 + vc3;
vp1 = vp1 * vt1 + vc3;
vp0 = vp0 * vt0 + vc2;
vp1 = vp1 * vt1 + vc2;
vp0 *= vt0;
vp1 *= vt1;
vt0 *= vs0;
vs0 -= vone;
vt1 *= vs1;
vs1 -= vone;
vp0 = vp0 * vt0 + vt0;
vp1 = vp1 * vt1 + vt1;
const float ve0 = (vp0 + vs0) * valpha;
float vy0 = vx0 * vbeta;
const float ve1 = (vp1 + vs1) * valpha;
float vy1 = vx1 * vbeta;
if XNN_UNPREDICTABLE(vx0 < 0.0f) {
vy0 = ve0;
}
if XNN_UNPREDICTABLE(vx1 < 0.0f) {
vy1 = ve1;
}
output[0] = vy0;
output[1] = vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
float vx = *input;
const float vz = vx * vprescale;
float vn = vz * vlog2e + vmagic_bias;
float vs = uint32_as_float(float_as_uint32(vn) << 23);
vn -= vmagic_bias;
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
vs = 0.0f;
vt = 0.0f;
}
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
vt *= vs;
vs -= vone;
vp = vp * vt + vt;
const float ve = (vp + vs) * valpha;
float vy = vx * vbeta;
if XNN_UNPREDICTABLE(vx < 0.0f) {
vy = ve;
}
*output = vy;
}
}
| 3,686 | 23.58 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-scalar-rr2-p6-x3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/scalar-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__scalar_rr2_p6_x3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vprescale = params->scalar_rr2_p6.prescale;
const float valpha = params->scalar_rr2_p6.alpha;
const float vbeta = params->scalar_rr2_p6.beta;
const float vmagic_bias = params->scalar_rr2_p6.magic_bias;
const float vlog2e = params->scalar_rr2_p6.log2e;
const float vsat_cutoff = params->scalar_rr2_p6.sat_cutoff;
const float vminus_ln2_hi = params->scalar_rr2_p6.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_p6.minus_ln2_lo;
const float vc6 = params->scalar_rr2_p6.c6;
const float vc5 = params->scalar_rr2_p6.c5;
const float vc4 = params->scalar_rr2_p6.c4;
const float vc3 = params->scalar_rr2_p6.c3;
const float vc2 = params->scalar_rr2_p6.c2;
const float vone = params->scalar_rr2_p6.one;
for (; batch >= 3 * sizeof(float); batch -= 3 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
input += 3;
const float vz0 = vx0 * vprescale;
const float vz1 = vx1 * vprescale;
const float vz2 = vx2 * vprescale;
float vn0 = vz0 * vlog2e + vmagic_bias;
float vn1 = vz1 * vlog2e + vmagic_bias;
float vn2 = vz2 * vlog2e + vmagic_bias;
float vs0 = uint32_as_float(float_as_uint32(vn0) << 23);
vn0 -= vmagic_bias;
float vs1 = uint32_as_float(float_as_uint32(vn1) << 23);
vn1 -= vmagic_bias;
float vs2 = uint32_as_float(float_as_uint32(vn2) << 23);
vn2 -= vmagic_bias;
float vt0 = vn0 * vminus_ln2_hi + vz0;
float vt1 = vn1 * vminus_ln2_hi + vz1;
float vt2 = vn2 * vminus_ln2_hi + vz2;
vt0 = vn0 * vminus_ln2_lo + vt0;
vt1 = vn1 * vminus_ln2_lo + vt1;
vt2 = vn2 * vminus_ln2_lo + vt2;
if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) {
vs0 = 0.0f;
vt0 = 0.0f;
}
if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) {
vs1 = 0.0f;
vt1 = 0.0f;
}
if XNN_UNPREDICTABLE(vz2 <= vsat_cutoff) {
vs2 = 0.0f;
vt2 = 0.0f;
}
float vp0 = vc6 * vt0 + vc5;
float vp1 = vc6 * vt1 + vc5;
float vp2 = vc6 * vt2 + vc5;
vp0 = vp0 * vt0 + vc4;
vp1 = vp1 * vt1 + vc4;
vp2 = vp2 * vt2 + vc4;
vp0 = vp0 * vt0 + vc3;
vp1 = vp1 * vt1 + vc3;
vp2 = vp2 * vt2 + vc3;
vp0 = vp0 * vt0 + vc2;
vp1 = vp1 * vt1 + vc2;
vp2 = vp2 * vt2 + vc2;
vp0 *= vt0;
vp1 *= vt1;
vp2 *= vt2;
vt0 *= vs0;
vs0 -= vone;
vt1 *= vs1;
vs1 -= vone;
vt2 *= vs2;
vs2 -= vone;
vp0 = vp0 * vt0 + vt0;
vp1 = vp1 * vt1 + vt1;
vp2 = vp2 * vt2 + vt2;
const float ve0 = (vp0 + vs0) * valpha;
float vy0 = vx0 * vbeta;
const float ve1 = (vp1 + vs1) * valpha;
float vy1 = vx1 * vbeta;
const float ve2 = (vp2 + vs2) * valpha;
float vy2 = vx2 * vbeta;
if XNN_UNPREDICTABLE(vx0 < 0.0f) {
vy0 = ve0;
}
if XNN_UNPREDICTABLE(vx1 < 0.0f) {
vy1 = ve1;
}
if XNN_UNPREDICTABLE(vx2 < 0.0f) {
vy2 = ve2;
}
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output += 3;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
const float vz = vx * vprescale;
float vn = vz * vlog2e + vmagic_bias;
float vs = uint32_as_float(float_as_uint32(vn) << 23);
vn -= vmagic_bias;
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
vs = 0.0f;
vt = 0.0f;
}
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
vt *= vs;
vs -= vone;
vp = vp * vt + vt;
const float ve = (vp + vs) * valpha;
float vy = vx * vbeta;
if XNN_UNPREDICTABLE(vx < 0.0f) {
vy = ve;
}
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 4,515 | 24.22905 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-scalar-rr2-p6-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/scalar-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__scalar_rr2_p6_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vprescale = params->scalar_rr2_p6.prescale;
const float valpha = params->scalar_rr2_p6.alpha;
const float vbeta = params->scalar_rr2_p6.beta;
const float vmagic_bias = params->scalar_rr2_p6.magic_bias;
const float vlog2e = params->scalar_rr2_p6.log2e;
const float vsat_cutoff = params->scalar_rr2_p6.sat_cutoff;
const float vminus_ln2_hi = params->scalar_rr2_p6.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_p6.minus_ln2_lo;
const float vc6 = params->scalar_rr2_p6.c6;
const float vc5 = params->scalar_rr2_p6.c5;
const float vc4 = params->scalar_rr2_p6.c4;
const float vc3 = params->scalar_rr2_p6.c3;
const float vc2 = params->scalar_rr2_p6.c2;
const float vone = params->scalar_rr2_p6.one;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
float vx3 = input[3];
input += 4;
const float vz0 = vx0 * vprescale;
const float vz1 = vx1 * vprescale;
const float vz2 = vx2 * vprescale;
const float vz3 = vx3 * vprescale;
float vn0 = vz0 * vlog2e + vmagic_bias;
float vn1 = vz1 * vlog2e + vmagic_bias;
float vn2 = vz2 * vlog2e + vmagic_bias;
float vn3 = vz3 * vlog2e + vmagic_bias;
float vs0 = uint32_as_float(float_as_uint32(vn0) << 23);
vn0 -= vmagic_bias;
float vs1 = uint32_as_float(float_as_uint32(vn1) << 23);
vn1 -= vmagic_bias;
float vs2 = uint32_as_float(float_as_uint32(vn2) << 23);
vn2 -= vmagic_bias;
float vs3 = uint32_as_float(float_as_uint32(vn3) << 23);
vn3 -= vmagic_bias;
float vt0 = vn0 * vminus_ln2_hi + vz0;
float vt1 = vn1 * vminus_ln2_hi + vz1;
float vt2 = vn2 * vminus_ln2_hi + vz2;
float vt3 = vn3 * vminus_ln2_hi + vz3;
vt0 = vn0 * vminus_ln2_lo + vt0;
vt1 = vn1 * vminus_ln2_lo + vt1;
vt2 = vn2 * vminus_ln2_lo + vt2;
vt3 = vn3 * vminus_ln2_lo + vt3;
if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) {
vs0 = 0.0f;
vt0 = 0.0f;
}
if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) {
vs1 = 0.0f;
vt1 = 0.0f;
}
if XNN_UNPREDICTABLE(vz2 <= vsat_cutoff) {
vs2 = 0.0f;
vt2 = 0.0f;
}
if XNN_UNPREDICTABLE(vz3 <= vsat_cutoff) {
vs3 = 0.0f;
vt3 = 0.0f;
}
float vp0 = vc6 * vt0 + vc5;
float vp1 = vc6 * vt1 + vc5;
float vp2 = vc6 * vt2 + vc5;
float vp3 = vc6 * vt3 + vc5;
vp0 = vp0 * vt0 + vc4;
vp1 = vp1 * vt1 + vc4;
vp2 = vp2 * vt2 + vc4;
vp3 = vp3 * vt3 + vc4;
vp0 = vp0 * vt0 + vc3;
vp1 = vp1 * vt1 + vc3;
vp2 = vp2 * vt2 + vc3;
vp3 = vp3 * vt3 + vc3;
vp0 = vp0 * vt0 + vc2;
vp1 = vp1 * vt1 + vc2;
vp2 = vp2 * vt2 + vc2;
vp3 = vp3 * vt3 + vc2;
vp0 *= vt0;
vp1 *= vt1;
vp2 *= vt2;
vp3 *= vt3;
vt0 *= vs0;
vs0 -= vone;
vt1 *= vs1;
vs1 -= vone;
vt2 *= vs2;
vs2 -= vone;
vt3 *= vs3;
vs3 -= vone;
vp0 = vp0 * vt0 + vt0;
vp1 = vp1 * vt1 + vt1;
vp2 = vp2 * vt2 + vt2;
vp3 = vp3 * vt3 + vt3;
const float ve0 = (vp0 + vs0) * valpha;
float vy0 = vx0 * vbeta;
const float ve1 = (vp1 + vs1) * valpha;
float vy1 = vx1 * vbeta;
const float ve2 = (vp2 + vs2) * valpha;
float vy2 = vx2 * vbeta;
const float ve3 = (vp3 + vs3) * valpha;
float vy3 = vx3 * vbeta;
if XNN_UNPREDICTABLE(vx0 < 0.0f) {
vy0 = ve0;
}
if XNN_UNPREDICTABLE(vx1 < 0.0f) {
vy1 = ve1;
}
if XNN_UNPREDICTABLE(vx2 < 0.0f) {
vy2 = ve2;
}
if XNN_UNPREDICTABLE(vx3 < 0.0f) {
vy3 = ve3;
}
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
const float vz = vx * vprescale;
float vn = vz * vlog2e + vmagic_bias;
float vs = uint32_as_float(float_as_uint32(vn) << 23);
vn -= vmagic_bias;
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
vs = 0.0f;
vt = 0.0f;
}
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
vt *= vs;
vs -= vone;
vp = vp * vt + vt;
const float ve = (vp + vs) * valpha;
float vy = vx * vbeta;
if XNN_UNPREDICTABLE(vx < 0.0f) {
vy = ve;
}
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 5,224 | 24.612745 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-scalar-rr2-p6-x5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/scalar-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__scalar_rr2_p6_x5(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vprescale = params->scalar_rr2_p6.prescale;
const float valpha = params->scalar_rr2_p6.alpha;
const float vbeta = params->scalar_rr2_p6.beta;
const float vmagic_bias = params->scalar_rr2_p6.magic_bias;
const float vlog2e = params->scalar_rr2_p6.log2e;
const float vsat_cutoff = params->scalar_rr2_p6.sat_cutoff;
const float vminus_ln2_hi = params->scalar_rr2_p6.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_p6.minus_ln2_lo;
const float vc6 = params->scalar_rr2_p6.c6;
const float vc5 = params->scalar_rr2_p6.c5;
const float vc4 = params->scalar_rr2_p6.c4;
const float vc3 = params->scalar_rr2_p6.c3;
const float vc2 = params->scalar_rr2_p6.c2;
const float vone = params->scalar_rr2_p6.one;
for (; batch >= 5 * sizeof(float); batch -= 5 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
float vx3 = input[3];
float vx4 = input[4];
input += 5;
const float vz0 = vx0 * vprescale;
const float vz1 = vx1 * vprescale;
const float vz2 = vx2 * vprescale;
const float vz3 = vx3 * vprescale;
const float vz4 = vx4 * vprescale;
float vn0 = vz0 * vlog2e + vmagic_bias;
float vn1 = vz1 * vlog2e + vmagic_bias;
float vn2 = vz2 * vlog2e + vmagic_bias;
float vn3 = vz3 * vlog2e + vmagic_bias;
float vn4 = vz4 * vlog2e + vmagic_bias;
float vs0 = uint32_as_float(float_as_uint32(vn0) << 23);
vn0 -= vmagic_bias;
float vs1 = uint32_as_float(float_as_uint32(vn1) << 23);
vn1 -= vmagic_bias;
float vs2 = uint32_as_float(float_as_uint32(vn2) << 23);
vn2 -= vmagic_bias;
float vs3 = uint32_as_float(float_as_uint32(vn3) << 23);
vn3 -= vmagic_bias;
float vs4 = uint32_as_float(float_as_uint32(vn4) << 23);
vn4 -= vmagic_bias;
float vt0 = vn0 * vminus_ln2_hi + vz0;
float vt1 = vn1 * vminus_ln2_hi + vz1;
float vt2 = vn2 * vminus_ln2_hi + vz2;
float vt3 = vn3 * vminus_ln2_hi + vz3;
float vt4 = vn4 * vminus_ln2_hi + vz4;
vt0 = vn0 * vminus_ln2_lo + vt0;
vt1 = vn1 * vminus_ln2_lo + vt1;
vt2 = vn2 * vminus_ln2_lo + vt2;
vt3 = vn3 * vminus_ln2_lo + vt3;
vt4 = vn4 * vminus_ln2_lo + vt4;
if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) {
vs0 = 0.0f;
vt0 = 0.0f;
}
if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) {
vs1 = 0.0f;
vt1 = 0.0f;
}
if XNN_UNPREDICTABLE(vz2 <= vsat_cutoff) {
vs2 = 0.0f;
vt2 = 0.0f;
}
if XNN_UNPREDICTABLE(vz3 <= vsat_cutoff) {
vs3 = 0.0f;
vt3 = 0.0f;
}
if XNN_UNPREDICTABLE(vz4 <= vsat_cutoff) {
vs4 = 0.0f;
vt4 = 0.0f;
}
float vp0 = vc6 * vt0 + vc5;
float vp1 = vc6 * vt1 + vc5;
float vp2 = vc6 * vt2 + vc5;
float vp3 = vc6 * vt3 + vc5;
float vp4 = vc6 * vt4 + vc5;
vp0 = vp0 * vt0 + vc4;
vp1 = vp1 * vt1 + vc4;
vp2 = vp2 * vt2 + vc4;
vp3 = vp3 * vt3 + vc4;
vp4 = vp4 * vt4 + vc4;
vp0 = vp0 * vt0 + vc3;
vp1 = vp1 * vt1 + vc3;
vp2 = vp2 * vt2 + vc3;
vp3 = vp3 * vt3 + vc3;
vp4 = vp4 * vt4 + vc3;
vp0 = vp0 * vt0 + vc2;
vp1 = vp1 * vt1 + vc2;
vp2 = vp2 * vt2 + vc2;
vp3 = vp3 * vt3 + vc2;
vp4 = vp4 * vt4 + vc2;
vp0 *= vt0;
vp1 *= vt1;
vp2 *= vt2;
vp3 *= vt3;
vp4 *= vt4;
vt0 *= vs0;
vs0 -= vone;
vt1 *= vs1;
vs1 -= vone;
vt2 *= vs2;
vs2 -= vone;
vt3 *= vs3;
vs3 -= vone;
vt4 *= vs4;
vs4 -= vone;
vp0 = vp0 * vt0 + vt0;
vp1 = vp1 * vt1 + vt1;
vp2 = vp2 * vt2 + vt2;
vp3 = vp3 * vt3 + vt3;
vp4 = vp4 * vt4 + vt4;
const float ve0 = (vp0 + vs0) * valpha;
float vy0 = vx0 * vbeta;
const float ve1 = (vp1 + vs1) * valpha;
float vy1 = vx1 * vbeta;
const float ve2 = (vp2 + vs2) * valpha;
float vy2 = vx2 * vbeta;
const float ve3 = (vp3 + vs3) * valpha;
float vy3 = vx3 * vbeta;
const float ve4 = (vp4 + vs4) * valpha;
float vy4 = vx4 * vbeta;
if XNN_UNPREDICTABLE(vx0 < 0.0f) {
vy0 = ve0;
}
if XNN_UNPREDICTABLE(vx1 < 0.0f) {
vy1 = ve1;
}
if XNN_UNPREDICTABLE(vx2 < 0.0f) {
vy2 = ve2;
}
if XNN_UNPREDICTABLE(vx3 < 0.0f) {
vy3 = ve3;
}
if XNN_UNPREDICTABLE(vx4 < 0.0f) {
vy4 = ve4;
}
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output[4] = vy4;
output += 5;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
const float vz = vx * vprescale;
float vn = vz * vlog2e + vmagic_bias;
float vs = uint32_as_float(float_as_uint32(vn) << 23);
vn -= vmagic_bias;
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
vs = 0.0f;
vt = 0.0f;
}
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
vt *= vs;
vs -= vone;
vp = vp * vt + vt;
const float ve = (vp + vs) * valpha;
float vy = vx * vbeta;
if XNN_UNPREDICTABLE(vx < 0.0f) {
vy = ve;
}
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 5,933 | 24.912664 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-scalar-rr2-p6-x6.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/scalar-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__scalar_rr2_p6_x6(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vprescale = params->scalar_rr2_p6.prescale;
const float valpha = params->scalar_rr2_p6.alpha;
const float vbeta = params->scalar_rr2_p6.beta;
const float vmagic_bias = params->scalar_rr2_p6.magic_bias;
const float vlog2e = params->scalar_rr2_p6.log2e;
const float vsat_cutoff = params->scalar_rr2_p6.sat_cutoff;
const float vminus_ln2_hi = params->scalar_rr2_p6.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_p6.minus_ln2_lo;
const float vc6 = params->scalar_rr2_p6.c6;
const float vc5 = params->scalar_rr2_p6.c5;
const float vc4 = params->scalar_rr2_p6.c4;
const float vc3 = params->scalar_rr2_p6.c3;
const float vc2 = params->scalar_rr2_p6.c2;
const float vone = params->scalar_rr2_p6.one;
for (; batch >= 6 * sizeof(float); batch -= 6 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
float vx3 = input[3];
float vx4 = input[4];
float vx5 = input[5];
input += 6;
const float vz0 = vx0 * vprescale;
const float vz1 = vx1 * vprescale;
const float vz2 = vx2 * vprescale;
const float vz3 = vx3 * vprescale;
const float vz4 = vx4 * vprescale;
const float vz5 = vx5 * vprescale;
float vn0 = vz0 * vlog2e + vmagic_bias;
float vn1 = vz1 * vlog2e + vmagic_bias;
float vn2 = vz2 * vlog2e + vmagic_bias;
float vn3 = vz3 * vlog2e + vmagic_bias;
float vn4 = vz4 * vlog2e + vmagic_bias;
float vn5 = vz5 * vlog2e + vmagic_bias;
float vs0 = uint32_as_float(float_as_uint32(vn0) << 23);
vn0 -= vmagic_bias;
float vs1 = uint32_as_float(float_as_uint32(vn1) << 23);
vn1 -= vmagic_bias;
float vs2 = uint32_as_float(float_as_uint32(vn2) << 23);
vn2 -= vmagic_bias;
float vs3 = uint32_as_float(float_as_uint32(vn3) << 23);
vn3 -= vmagic_bias;
float vs4 = uint32_as_float(float_as_uint32(vn4) << 23);
vn4 -= vmagic_bias;
float vs5 = uint32_as_float(float_as_uint32(vn5) << 23);
vn5 -= vmagic_bias;
float vt0 = vn0 * vminus_ln2_hi + vz0;
float vt1 = vn1 * vminus_ln2_hi + vz1;
float vt2 = vn2 * vminus_ln2_hi + vz2;
float vt3 = vn3 * vminus_ln2_hi + vz3;
float vt4 = vn4 * vminus_ln2_hi + vz4;
float vt5 = vn5 * vminus_ln2_hi + vz5;
vt0 = vn0 * vminus_ln2_lo + vt0;
vt1 = vn1 * vminus_ln2_lo + vt1;
vt2 = vn2 * vminus_ln2_lo + vt2;
vt3 = vn3 * vminus_ln2_lo + vt3;
vt4 = vn4 * vminus_ln2_lo + vt4;
vt5 = vn5 * vminus_ln2_lo + vt5;
if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) {
vs0 = 0.0f;
vt0 = 0.0f;
}
if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) {
vs1 = 0.0f;
vt1 = 0.0f;
}
if XNN_UNPREDICTABLE(vz2 <= vsat_cutoff) {
vs2 = 0.0f;
vt2 = 0.0f;
}
if XNN_UNPREDICTABLE(vz3 <= vsat_cutoff) {
vs3 = 0.0f;
vt3 = 0.0f;
}
if XNN_UNPREDICTABLE(vz4 <= vsat_cutoff) {
vs4 = 0.0f;
vt4 = 0.0f;
}
if XNN_UNPREDICTABLE(vz5 <= vsat_cutoff) {
vs5 = 0.0f;
vt5 = 0.0f;
}
float vp0 = vc6 * vt0 + vc5;
float vp1 = vc6 * vt1 + vc5;
float vp2 = vc6 * vt2 + vc5;
float vp3 = vc6 * vt3 + vc5;
float vp4 = vc6 * vt4 + vc5;
float vp5 = vc6 * vt5 + vc5;
vp0 = vp0 * vt0 + vc4;
vp1 = vp1 * vt1 + vc4;
vp2 = vp2 * vt2 + vc4;
vp3 = vp3 * vt3 + vc4;
vp4 = vp4 * vt4 + vc4;
vp5 = vp5 * vt5 + vc4;
vp0 = vp0 * vt0 + vc3;
vp1 = vp1 * vt1 + vc3;
vp2 = vp2 * vt2 + vc3;
vp3 = vp3 * vt3 + vc3;
vp4 = vp4 * vt4 + vc3;
vp5 = vp5 * vt5 + vc3;
vp0 = vp0 * vt0 + vc2;
vp1 = vp1 * vt1 + vc2;
vp2 = vp2 * vt2 + vc2;
vp3 = vp3 * vt3 + vc2;
vp4 = vp4 * vt4 + vc2;
vp5 = vp5 * vt5 + vc2;
vp0 *= vt0;
vp1 *= vt1;
vp2 *= vt2;
vp3 *= vt3;
vp4 *= vt4;
vp5 *= vt5;
vt0 *= vs0;
vs0 -= vone;
vt1 *= vs1;
vs1 -= vone;
vt2 *= vs2;
vs2 -= vone;
vt3 *= vs3;
vs3 -= vone;
vt4 *= vs4;
vs4 -= vone;
vt5 *= vs5;
vs5 -= vone;
vp0 = vp0 * vt0 + vt0;
vp1 = vp1 * vt1 + vt1;
vp2 = vp2 * vt2 + vt2;
vp3 = vp3 * vt3 + vt3;
vp4 = vp4 * vt4 + vt4;
vp5 = vp5 * vt5 + vt5;
const float ve0 = (vp0 + vs0) * valpha;
float vy0 = vx0 * vbeta;
const float ve1 = (vp1 + vs1) * valpha;
float vy1 = vx1 * vbeta;
const float ve2 = (vp2 + vs2) * valpha;
float vy2 = vx2 * vbeta;
const float ve3 = (vp3 + vs3) * valpha;
float vy3 = vx3 * vbeta;
const float ve4 = (vp4 + vs4) * valpha;
float vy4 = vx4 * vbeta;
const float ve5 = (vp5 + vs5) * valpha;
float vy5 = vx5 * vbeta;
if XNN_UNPREDICTABLE(vx0 < 0.0f) {
vy0 = ve0;
}
if XNN_UNPREDICTABLE(vx1 < 0.0f) {
vy1 = ve1;
}
if XNN_UNPREDICTABLE(vx2 < 0.0f) {
vy2 = ve2;
}
if XNN_UNPREDICTABLE(vx3 < 0.0f) {
vy3 = ve3;
}
if XNN_UNPREDICTABLE(vx4 < 0.0f) {
vy4 = ve4;
}
if XNN_UNPREDICTABLE(vx5 < 0.0f) {
vy5 = ve5;
}
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output[4] = vy4;
output[5] = vy5;
output += 6;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
const float vz = vx * vprescale;
float vn = vz * vlog2e + vmagic_bias;
float vs = uint32_as_float(float_as_uint32(vn) << 23);
vn -= vmagic_bias;
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
vs = 0.0f;
vt = 0.0f;
}
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
vt *= vs;
vs -= vone;
vp = vp * vt + vt;
const float ve = (vp + vs) * valpha;
float vy = vx * vbeta;
if XNN_UNPREDICTABLE(vx < 0.0f) {
vy = ve;
}
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 6,642 | 25.153543 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-sse2-rr2-p6-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/sse-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__sse2_rr2_p6_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vprescale = _mm_load_ps(params->sse2_rr2_p6.prescale);
const __m128 valpha = _mm_load_ps(params->sse2_rr2_p6.alpha);
const __m128 vbeta = _mm_load_ps(params->sse2_rr2_p6.beta);
const __m128 vsat_cutoff = _mm_load_ps(params->sse2_rr2_p6.sat_cutoff);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p6.magic_bias);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p6.log2e);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_lo);
const __m128 vc6 = _mm_load_ps(params->sse2_rr2_p6.c6);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p6.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p6.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p6.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p6.c2);
const __m128 vone = _mm_load_ps(params->sse2_rr2_p6.one);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
__m128 vx0123 = _mm_loadu_ps(input);
__m128 vx4567 = _mm_loadu_ps(input + 4);
__m128 vx89AB = _mm_loadu_ps(input + 8);
input += 12;
const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale));
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
__m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
__m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
__m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vp0123 = _mm_mul_ps(vp0123, vt0123);
vp4567 = _mm_mul_ps(vp4567, vt4567);
vp89AB = _mm_mul_ps(vp89AB, vt89AB);
vt0123 = _mm_mul_ps(vt0123, vs0123);
vs0123 = _mm_sub_ps(vs0123, vone);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vs4567 = _mm_sub_ps(vs4567, vone);
vt89AB = _mm_mul_ps(vt89AB, vs89AB);
vs89AB = _mm_sub_ps(vs89AB, vone);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB);
const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha);
const __m128 vm0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx0123)));
vx0123 = _mm_mul_ps(vx0123, vbeta);
const __m128 vm4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx4567)));
vx4567 = _mm_mul_ps(vx4567, vbeta);
const __m128 vm89AB = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx89AB)));
vx89AB = _mm_mul_ps(vx89AB, vbeta);
const __m128 vy0123 = _mm_or_ps(_mm_and_ps(ve0123, vm0123), _mm_andnot_ps(vm0123, vx0123));
const __m128 vy4567 = _mm_or_ps(_mm_and_ps(ve4567, vm4567), _mm_andnot_ps(vm4567, vx4567));
const __m128 vy89AB = _mm_or_ps(_mm_and_ps(ve89AB, vm89AB), _mm_andnot_ps(vm89AB, vx89AB));
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
__m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
__m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
vt = _mm_mul_ps(vt, vs);
vs = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vx = _mm_mul_ps(vx, vbeta);
const __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
__m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
vt = _mm_mul_ps(vt, vs);
vs = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vx = _mm_mul_ps(vx, vbeta);
__m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 7,870 | 39.782383 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-sse2-rr2-p6-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/sse-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__sse2_rr2_p6_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vprescale = _mm_load_ps(params->sse2_rr2_p6.prescale);
const __m128 valpha = _mm_load_ps(params->sse2_rr2_p6.alpha);
const __m128 vbeta = _mm_load_ps(params->sse2_rr2_p6.beta);
const __m128 vsat_cutoff = _mm_load_ps(params->sse2_rr2_p6.sat_cutoff);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p6.magic_bias);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p6.log2e);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_lo);
const __m128 vc6 = _mm_load_ps(params->sse2_rr2_p6.c6);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p6.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p6.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p6.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p6.c2);
const __m128 vone = _mm_load_ps(params->sse2_rr2_p6.one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m128 vx0123 = _mm_loadu_ps(input);
__m128 vx4567 = _mm_loadu_ps(input + 4);
__m128 vx89AB = _mm_loadu_ps(input + 8);
__m128 vxCDEF = _mm_loadu_ps(input + 12);
input += 16;
const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale));
const __m128 vzCDEF = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxCDEF, vprescale));
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
__m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
__m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
__m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
__m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
__m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
__m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vp0123 = _mm_mul_ps(vp0123, vt0123);
vp4567 = _mm_mul_ps(vp4567, vt4567);
vp89AB = _mm_mul_ps(vp89AB, vt89AB);
vpCDEF = _mm_mul_ps(vpCDEF, vtCDEF);
vt0123 = _mm_mul_ps(vt0123, vs0123);
vs0123 = _mm_sub_ps(vs0123, vone);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vs4567 = _mm_sub_ps(vs4567, vone);
vt89AB = _mm_mul_ps(vt89AB, vs89AB);
vs89AB = _mm_sub_ps(vs89AB, vone);
vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
vsCDEF = _mm_sub_ps(vsCDEF, vone);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vtCDEF);
const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha);
const __m128 veCDEF = _mm_mul_ps(_mm_add_ps(vpCDEF, vsCDEF), valpha);
const __m128 vm0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx0123)));
vx0123 = _mm_mul_ps(vx0123, vbeta);
const __m128 vm4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx4567)));
vx4567 = _mm_mul_ps(vx4567, vbeta);
const __m128 vm89AB = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx89AB)));
vx89AB = _mm_mul_ps(vx89AB, vbeta);
const __m128 vmCDEF = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vxCDEF)));
vxCDEF = _mm_mul_ps(vxCDEF, vbeta);
const __m128 vy0123 = _mm_or_ps(_mm_and_ps(ve0123, vm0123), _mm_andnot_ps(vm0123, vx0123));
const __m128 vy4567 = _mm_or_ps(_mm_and_ps(ve4567, vm4567), _mm_andnot_ps(vm4567, vx4567));
const __m128 vy89AB = _mm_or_ps(_mm_and_ps(ve89AB, vm89AB), _mm_andnot_ps(vm89AB, vx89AB));
const __m128 vyCDEF = _mm_or_ps(_mm_and_ps(veCDEF, vmCDEF), _mm_andnot_ps(vmCDEF, vxCDEF));
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
__m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
__m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
vt = _mm_mul_ps(vt, vs);
vs = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vx = _mm_mul_ps(vx, vbeta);
const __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
__m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
vt = _mm_mul_ps(vt, vs);
vs = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vx = _mm_mul_ps(vx, vbeta);
__m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 9,120 | 41.821596 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-sse2-rr2-p6-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/sse-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__sse2_rr2_p6_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vprescale = _mm_load_ps(params->sse2_rr2_p6.prescale);
const __m128 valpha = _mm_load_ps(params->sse2_rr2_p6.alpha);
const __m128 vbeta = _mm_load_ps(params->sse2_rr2_p6.beta);
const __m128 vsat_cutoff = _mm_load_ps(params->sse2_rr2_p6.sat_cutoff);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p6.magic_bias);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p6.log2e);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_lo);
const __m128 vc6 = _mm_load_ps(params->sse2_rr2_p6.c6);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p6.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p6.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p6.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p6.c2);
const __m128 vone = _mm_load_ps(params->sse2_rr2_p6.one);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
__m128 vx0123 = _mm_loadu_ps(input);
__m128 vx4567 = _mm_loadu_ps(input + 4);
__m128 vx89AB = _mm_loadu_ps(input + 8);
__m128 vxCDEF = _mm_loadu_ps(input + 12);
__m128 vxGHIJ = _mm_loadu_ps(input + 16);
input += 20;
const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale));
const __m128 vzCDEF = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxCDEF, vprescale));
const __m128 vzGHIJ = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxGHIJ, vprescale));
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
__m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vzGHIJ, vlog2e), vmagic_bias);
__m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
__m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
__m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
__m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
__m128 vsGHIJ = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnGHIJ), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
__m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
__m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF);
__m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_hi), vzGHIJ);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_lo), vtGHIJ);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5);
__m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc6, vtGHIJ), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc2);
vp0123 = _mm_mul_ps(vp0123, vt0123);
vp4567 = _mm_mul_ps(vp4567, vt4567);
vp89AB = _mm_mul_ps(vp89AB, vt89AB);
vpCDEF = _mm_mul_ps(vpCDEF, vtCDEF);
vpGHIJ = _mm_mul_ps(vpGHIJ, vtGHIJ);
vt0123 = _mm_mul_ps(vt0123, vs0123);
vs0123 = _mm_sub_ps(vs0123, vone);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vs4567 = _mm_sub_ps(vs4567, vone);
vt89AB = _mm_mul_ps(vt89AB, vs89AB);
vs89AB = _mm_sub_ps(vs89AB, vone);
vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
vsCDEF = _mm_sub_ps(vsCDEF, vone);
vtGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
vsGHIJ = _mm_sub_ps(vsGHIJ, vone);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vtCDEF);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vtGHIJ);
const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha);
const __m128 veCDEF = _mm_mul_ps(_mm_add_ps(vpCDEF, vsCDEF), valpha);
const __m128 veGHIJ = _mm_mul_ps(_mm_add_ps(vpGHIJ, vsGHIJ), valpha);
const __m128 vm0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx0123)));
vx0123 = _mm_mul_ps(vx0123, vbeta);
const __m128 vm4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx4567)));
vx4567 = _mm_mul_ps(vx4567, vbeta);
const __m128 vm89AB = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx89AB)));
vx89AB = _mm_mul_ps(vx89AB, vbeta);
const __m128 vmCDEF = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vxCDEF)));
vxCDEF = _mm_mul_ps(vxCDEF, vbeta);
const __m128 vmGHIJ = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vxGHIJ)));
vxGHIJ = _mm_mul_ps(vxGHIJ, vbeta);
const __m128 vy0123 = _mm_or_ps(_mm_and_ps(ve0123, vm0123), _mm_andnot_ps(vm0123, vx0123));
const __m128 vy4567 = _mm_or_ps(_mm_and_ps(ve4567, vm4567), _mm_andnot_ps(vm4567, vx4567));
const __m128 vy89AB = _mm_or_ps(_mm_and_ps(ve89AB, vm89AB), _mm_andnot_ps(vm89AB, vx89AB));
const __m128 vyCDEF = _mm_or_ps(_mm_and_ps(veCDEF, vmCDEF), _mm_andnot_ps(vmCDEF, vxCDEF));
const __m128 vyGHIJ = _mm_or_ps(_mm_and_ps(veGHIJ, vmGHIJ), _mm_andnot_ps(vmGHIJ, vxGHIJ));
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
_mm_storeu_ps(output + 16, vyGHIJ);
output += 20;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
__m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
__m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
vt = _mm_mul_ps(vt, vs);
vs = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vx = _mm_mul_ps(vx, vbeta);
const __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
__m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
vt = _mm_mul_ps(vt, vs);
vs = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vx = _mm_mul_ps(vx, vbeta);
__m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 10,370 | 43.51073 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-sse2-rr2-p6-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/sse-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__sse2_rr2_p6_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vprescale = _mm_load_ps(params->sse2_rr2_p6.prescale);
const __m128 valpha = _mm_load_ps(params->sse2_rr2_p6.alpha);
const __m128 vbeta = _mm_load_ps(params->sse2_rr2_p6.beta);
const __m128 vsat_cutoff = _mm_load_ps(params->sse2_rr2_p6.sat_cutoff);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p6.magic_bias);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p6.log2e);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_lo);
const __m128 vc6 = _mm_load_ps(params->sse2_rr2_p6.c6);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p6.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p6.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p6.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p6.c2);
const __m128 vone = _mm_load_ps(params->sse2_rr2_p6.one);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
__m128 vx0123 = _mm_loadu_ps(input);
__m128 vx4567 = _mm_loadu_ps(input + 4);
__m128 vx89AB = _mm_loadu_ps(input + 8);
__m128 vxCDEF = _mm_loadu_ps(input + 12);
__m128 vxGHIJ = _mm_loadu_ps(input + 16);
__m128 vxKLMN = _mm_loadu_ps(input + 20);
input += 24;
const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale));
const __m128 vzCDEF = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxCDEF, vprescale));
const __m128 vzGHIJ = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxGHIJ, vprescale));
const __m128 vzKLMN = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxKLMN, vprescale));
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
__m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vzGHIJ, vlog2e), vmagic_bias);
__m128 vnKLMN = _mm_add_ps(_mm_mul_ps(vzKLMN, vlog2e), vmagic_bias);
__m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
__m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
__m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
__m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
__m128 vsGHIJ = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnGHIJ), 23));
__m128 vsKLMN = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnKLMN), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
vnKLMN = _mm_sub_ps(vnKLMN, vmagic_bias);
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
__m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
__m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF);
__m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_hi), vzGHIJ);
__m128 vtKLMN = _mm_add_ps(_mm_mul_ps(vnKLMN, vminus_ln2_hi), vzKLMN);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_lo), vtGHIJ);
vtKLMN = _mm_add_ps(_mm_mul_ps(vnKLMN, vminus_ln2_lo), vtKLMN);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5);
__m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc6, vtGHIJ), vc5);
__m128 vpKLMN = _mm_add_ps(_mm_mul_ps(vc6, vtKLMN), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc4);
vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc3);
vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc2);
vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc2);
vp0123 = _mm_mul_ps(vp0123, vt0123);
vp4567 = _mm_mul_ps(vp4567, vt4567);
vp89AB = _mm_mul_ps(vp89AB, vt89AB);
vpCDEF = _mm_mul_ps(vpCDEF, vtCDEF);
vpGHIJ = _mm_mul_ps(vpGHIJ, vtGHIJ);
vpKLMN = _mm_mul_ps(vpKLMN, vtKLMN);
vt0123 = _mm_mul_ps(vt0123, vs0123);
vs0123 = _mm_sub_ps(vs0123, vone);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vs4567 = _mm_sub_ps(vs4567, vone);
vt89AB = _mm_mul_ps(vt89AB, vs89AB);
vs89AB = _mm_sub_ps(vs89AB, vone);
vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
vsCDEF = _mm_sub_ps(vsCDEF, vone);
vtGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
vsGHIJ = _mm_sub_ps(vsGHIJ, vone);
vtKLMN = _mm_mul_ps(vtKLMN, vsKLMN);
vsKLMN = _mm_sub_ps(vsKLMN, vone);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vtCDEF);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vtGHIJ);
vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vtKLMN);
const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha);
const __m128 veCDEF = _mm_mul_ps(_mm_add_ps(vpCDEF, vsCDEF), valpha);
const __m128 veGHIJ = _mm_mul_ps(_mm_add_ps(vpGHIJ, vsGHIJ), valpha);
const __m128 veKLMN = _mm_mul_ps(_mm_add_ps(vpKLMN, vsKLMN), valpha);
const __m128 vm0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx0123)));
vx0123 = _mm_mul_ps(vx0123, vbeta);
const __m128 vm4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx4567)));
vx4567 = _mm_mul_ps(vx4567, vbeta);
const __m128 vm89AB = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx89AB)));
vx89AB = _mm_mul_ps(vx89AB, vbeta);
const __m128 vmCDEF = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vxCDEF)));
vxCDEF = _mm_mul_ps(vxCDEF, vbeta);
const __m128 vmGHIJ = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vxGHIJ)));
vxGHIJ = _mm_mul_ps(vxGHIJ, vbeta);
const __m128 vmKLMN = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vxKLMN)));
vxKLMN = _mm_mul_ps(vxKLMN, vbeta);
const __m128 vy0123 = _mm_or_ps(_mm_and_ps(ve0123, vm0123), _mm_andnot_ps(vm0123, vx0123));
const __m128 vy4567 = _mm_or_ps(_mm_and_ps(ve4567, vm4567), _mm_andnot_ps(vm4567, vx4567));
const __m128 vy89AB = _mm_or_ps(_mm_and_ps(ve89AB, vm89AB), _mm_andnot_ps(vm89AB, vx89AB));
const __m128 vyCDEF = _mm_or_ps(_mm_and_ps(veCDEF, vmCDEF), _mm_andnot_ps(vmCDEF, vxCDEF));
const __m128 vyGHIJ = _mm_or_ps(_mm_and_ps(veGHIJ, vmGHIJ), _mm_andnot_ps(vmGHIJ, vxGHIJ));
const __m128 vyKLMN = _mm_or_ps(_mm_and_ps(veKLMN, vmKLMN), _mm_andnot_ps(vmKLMN, vxKLMN));
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
_mm_storeu_ps(output + 16, vyGHIJ);
_mm_storeu_ps(output + 20, vyKLMN);
output += 24;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
__m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
__m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
vt = _mm_mul_ps(vt, vs);
vs = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vx = _mm_mul_ps(vx, vbeta);
const __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
__m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
vt = _mm_mul_ps(vt, vs);
vs = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vx = _mm_mul_ps(vx, vbeta);
__m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 11,620 | 44.932806 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-sse2-rr2-p6-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/sse-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__sse2_rr2_p6_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vprescale = _mm_load_ps(params->sse2_rr2_p6.prescale);
const __m128 valpha = _mm_load_ps(params->sse2_rr2_p6.alpha);
const __m128 vbeta = _mm_load_ps(params->sse2_rr2_p6.beta);
const __m128 vsat_cutoff = _mm_load_ps(params->sse2_rr2_p6.sat_cutoff);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p6.magic_bias);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p6.log2e);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_lo);
const __m128 vc6 = _mm_load_ps(params->sse2_rr2_p6.c6);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p6.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p6.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p6.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p6.c2);
const __m128 vone = _mm_load_ps(params->sse2_rr2_p6.one);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
__m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
__m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
vt = _mm_mul_ps(vt, vs);
vs = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vx = _mm_mul_ps(vx, vbeta);
const __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
__m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
vt = _mm_mul_ps(vt, vs);
vs = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vx = _mm_mul_ps(vx, vbeta);
__m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 4,008 | 34.794643 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-sse2-rr2-p6-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/sse-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__sse2_rr2_p6_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vprescale = _mm_load_ps(params->sse2_rr2_p6.prescale);
const __m128 valpha = _mm_load_ps(params->sse2_rr2_p6.alpha);
const __m128 vbeta = _mm_load_ps(params->sse2_rr2_p6.beta);
const __m128 vsat_cutoff = _mm_load_ps(params->sse2_rr2_p6.sat_cutoff);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p6.magic_bias);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p6.log2e);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_lo);
const __m128 vc6 = _mm_load_ps(params->sse2_rr2_p6.c6);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p6.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p6.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p6.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p6.c2);
const __m128 vone = _mm_load_ps(params->sse2_rr2_p6.one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m128 vx0123 = _mm_loadu_ps(input);
__m128 vx4567 = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
__m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp0123 = _mm_mul_ps(vp0123, vt0123);
vp4567 = _mm_mul_ps(vp4567, vt4567);
vt0123 = _mm_mul_ps(vt0123, vs0123);
vs0123 = _mm_sub_ps(vs0123, vone);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vs4567 = _mm_sub_ps(vs4567, vone);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
const __m128 vm0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx0123)));
vx0123 = _mm_mul_ps(vx0123, vbeta);
const __m128 vm4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx4567)));
vx4567 = _mm_mul_ps(vx4567, vbeta);
const __m128 vy0123 = _mm_or_ps(_mm_and_ps(ve0123, vm0123), _mm_andnot_ps(vm0123, vx0123));
const __m128 vy4567 = _mm_or_ps(_mm_and_ps(ve4567, vm4567), _mm_andnot_ps(vm4567, vx4567));
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
__m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
__m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
vt = _mm_mul_ps(vt, vs);
vs = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vx = _mm_mul_ps(vx, vbeta);
const __m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
__m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
vt = _mm_mul_ps(vt, vs);
vs = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
const __m128 vm = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vx = _mm_mul_ps(vx, vbeta);
__m128 vy = _mm_or_ps(_mm_and_ps(ve, vm), _mm_andnot_ps(vm, vx));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 6,617 | 37.254335 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-sse41-rr2-p6-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/sse-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__sse41_rr2_p6_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vprescale = _mm_load_ps(params->sse2_rr2_p6.prescale);
const __m128 valpha = _mm_load_ps(params->sse2_rr2_p6.alpha);
const __m128 vbeta = _mm_load_ps(params->sse2_rr2_p6.beta);
const __m128 vsat_cutoff = _mm_load_ps(params->sse2_rr2_p6.sat_cutoff);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p6.magic_bias);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p6.log2e);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_lo);
const __m128 vc6 = _mm_load_ps(params->sse2_rr2_p6.c6);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p6.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p6.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p6.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p6.c2);
const __m128 vone = _mm_load_ps(params->sse2_rr2_p6.one);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
__m128 vx0123 = _mm_loadu_ps(input);
__m128 vx4567 = _mm_loadu_ps(input + 4);
__m128 vx89AB = _mm_loadu_ps(input + 8);
input += 12;
const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale));
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
__m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
__m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
__m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vp0123 = _mm_mul_ps(vp0123, vt0123);
vp4567 = _mm_mul_ps(vp4567, vt4567);
vp89AB = _mm_mul_ps(vp89AB, vt89AB);
vt0123 = _mm_mul_ps(vt0123, vs0123);
vs0123 = _mm_sub_ps(vs0123, vone);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vs4567 = _mm_sub_ps(vs4567, vone);
vt89AB = _mm_mul_ps(vt89AB, vs89AB);
vs89AB = _mm_sub_ps(vs89AB, vone);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB);
const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha);
vx0123 = _mm_mul_ps(vx0123, vbeta);
vx4567 = _mm_mul_ps(vx4567, vbeta);
vx89AB = _mm_mul_ps(vx89AB, vbeta);
const __m128 vy0123 = _mm_blendv_ps(vx0123, ve0123, vx0123);
const __m128 vy4567 = _mm_blendv_ps(vx4567, ve4567, vx4567);
const __m128 vy89AB = _mm_blendv_ps(vx89AB, ve89AB, vx89AB);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
__m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
__m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
vt = _mm_mul_ps(vt, vs);
vs = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
vx = _mm_mul_ps(vx, vbeta);
const __m128 vy = _mm_blendv_ps(vx, ve, vx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
__m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
vt = _mm_mul_ps(vt, vs);
vs = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
vx = _mm_mul_ps(vx, vbeta);
__m128 vy = _mm_blendv_ps(vx, ve, vx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 7,200 | 37.303191 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-sse41-rr2-p6-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/sse-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__sse41_rr2_p6_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vprescale = _mm_load_ps(params->sse2_rr2_p6.prescale);
const __m128 valpha = _mm_load_ps(params->sse2_rr2_p6.alpha);
const __m128 vbeta = _mm_load_ps(params->sse2_rr2_p6.beta);
const __m128 vsat_cutoff = _mm_load_ps(params->sse2_rr2_p6.sat_cutoff);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p6.magic_bias);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p6.log2e);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_lo);
const __m128 vc6 = _mm_load_ps(params->sse2_rr2_p6.c6);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p6.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p6.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p6.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p6.c2);
const __m128 vone = _mm_load_ps(params->sse2_rr2_p6.one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m128 vx0123 = _mm_loadu_ps(input);
__m128 vx4567 = _mm_loadu_ps(input + 4);
__m128 vx89AB = _mm_loadu_ps(input + 8);
__m128 vxCDEF = _mm_loadu_ps(input + 12);
input += 16;
const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale));
const __m128 vzCDEF = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxCDEF, vprescale));
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
__m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
__m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
__m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
__m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
__m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
__m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vp0123 = _mm_mul_ps(vp0123, vt0123);
vp4567 = _mm_mul_ps(vp4567, vt4567);
vp89AB = _mm_mul_ps(vp89AB, vt89AB);
vpCDEF = _mm_mul_ps(vpCDEF, vtCDEF);
vt0123 = _mm_mul_ps(vt0123, vs0123);
vs0123 = _mm_sub_ps(vs0123, vone);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vs4567 = _mm_sub_ps(vs4567, vone);
vt89AB = _mm_mul_ps(vt89AB, vs89AB);
vs89AB = _mm_sub_ps(vs89AB, vone);
vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
vsCDEF = _mm_sub_ps(vsCDEF, vone);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vtCDEF);
const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha);
const __m128 veCDEF = _mm_mul_ps(_mm_add_ps(vpCDEF, vsCDEF), valpha);
vx0123 = _mm_mul_ps(vx0123, vbeta);
vx4567 = _mm_mul_ps(vx4567, vbeta);
vx89AB = _mm_mul_ps(vx89AB, vbeta);
vxCDEF = _mm_mul_ps(vxCDEF, vbeta);
const __m128 vy0123 = _mm_blendv_ps(vx0123, ve0123, vx0123);
const __m128 vy4567 = _mm_blendv_ps(vx4567, ve4567, vx4567);
const __m128 vy89AB = _mm_blendv_ps(vx89AB, ve89AB, vx89AB);
const __m128 vyCDEF = _mm_blendv_ps(vxCDEF, veCDEF, vxCDEF);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
__m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
__m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
vt = _mm_mul_ps(vt, vs);
vs = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
vx = _mm_mul_ps(vx, vbeta);
const __m128 vy = _mm_blendv_ps(vx, ve, vx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
__m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
vt = _mm_mul_ps(vt, vs);
vs = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
vx = _mm_mul_ps(vx, vbeta);
__m128 vy = _mm_blendv_ps(vx, ve, vx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 8,311 | 39.154589 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-sse41-rr2-p6-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/sse-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__sse41_rr2_p6_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vprescale = _mm_load_ps(params->sse2_rr2_p6.prescale);
const __m128 valpha = _mm_load_ps(params->sse2_rr2_p6.alpha);
const __m128 vbeta = _mm_load_ps(params->sse2_rr2_p6.beta);
const __m128 vsat_cutoff = _mm_load_ps(params->sse2_rr2_p6.sat_cutoff);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p6.magic_bias);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p6.log2e);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_lo);
const __m128 vc6 = _mm_load_ps(params->sse2_rr2_p6.c6);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p6.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p6.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p6.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p6.c2);
const __m128 vone = _mm_load_ps(params->sse2_rr2_p6.one);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
__m128 vx0123 = _mm_loadu_ps(input);
__m128 vx4567 = _mm_loadu_ps(input + 4);
__m128 vx89AB = _mm_loadu_ps(input + 8);
__m128 vxCDEF = _mm_loadu_ps(input + 12);
__m128 vxGHIJ = _mm_loadu_ps(input + 16);
input += 20;
const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale));
const __m128 vzCDEF = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxCDEF, vprescale));
const __m128 vzGHIJ = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxGHIJ, vprescale));
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
__m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vzGHIJ, vlog2e), vmagic_bias);
__m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
__m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
__m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
__m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
__m128 vsGHIJ = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnGHIJ), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
__m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
__m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF);
__m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_hi), vzGHIJ);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_lo), vtGHIJ);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5);
__m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc6, vtGHIJ), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc2);
vp0123 = _mm_mul_ps(vp0123, vt0123);
vp4567 = _mm_mul_ps(vp4567, vt4567);
vp89AB = _mm_mul_ps(vp89AB, vt89AB);
vpCDEF = _mm_mul_ps(vpCDEF, vtCDEF);
vpGHIJ = _mm_mul_ps(vpGHIJ, vtGHIJ);
vt0123 = _mm_mul_ps(vt0123, vs0123);
vs0123 = _mm_sub_ps(vs0123, vone);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vs4567 = _mm_sub_ps(vs4567, vone);
vt89AB = _mm_mul_ps(vt89AB, vs89AB);
vs89AB = _mm_sub_ps(vs89AB, vone);
vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
vsCDEF = _mm_sub_ps(vsCDEF, vone);
vtGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
vsGHIJ = _mm_sub_ps(vsGHIJ, vone);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vtCDEF);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vtGHIJ);
const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha);
const __m128 veCDEF = _mm_mul_ps(_mm_add_ps(vpCDEF, vsCDEF), valpha);
const __m128 veGHIJ = _mm_mul_ps(_mm_add_ps(vpGHIJ, vsGHIJ), valpha);
vx0123 = _mm_mul_ps(vx0123, vbeta);
vx4567 = _mm_mul_ps(vx4567, vbeta);
vx89AB = _mm_mul_ps(vx89AB, vbeta);
vxCDEF = _mm_mul_ps(vxCDEF, vbeta);
vxGHIJ = _mm_mul_ps(vxGHIJ, vbeta);
const __m128 vy0123 = _mm_blendv_ps(vx0123, ve0123, vx0123);
const __m128 vy4567 = _mm_blendv_ps(vx4567, ve4567, vx4567);
const __m128 vy89AB = _mm_blendv_ps(vx89AB, ve89AB, vx89AB);
const __m128 vyCDEF = _mm_blendv_ps(vxCDEF, veCDEF, vxCDEF);
const __m128 vyGHIJ = _mm_blendv_ps(vxGHIJ, veGHIJ, vxGHIJ);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
_mm_storeu_ps(output + 16, vyGHIJ);
output += 20;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
__m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
__m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
vt = _mm_mul_ps(vt, vs);
vs = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
vx = _mm_mul_ps(vx, vbeta);
const __m128 vy = _mm_blendv_ps(vx, ve, vx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
__m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
vt = _mm_mul_ps(vt, vs);
vs = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
vx = _mm_mul_ps(vx, vbeta);
__m128 vy = _mm_blendv_ps(vx, ve, vx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 9,422 | 40.69469 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-sse41-rr2-p6-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/sse-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__sse41_rr2_p6_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vprescale = _mm_load_ps(params->sse2_rr2_p6.prescale);
const __m128 valpha = _mm_load_ps(params->sse2_rr2_p6.alpha);
const __m128 vbeta = _mm_load_ps(params->sse2_rr2_p6.beta);
const __m128 vsat_cutoff = _mm_load_ps(params->sse2_rr2_p6.sat_cutoff);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p6.magic_bias);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p6.log2e);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_lo);
const __m128 vc6 = _mm_load_ps(params->sse2_rr2_p6.c6);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p6.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p6.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p6.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p6.c2);
const __m128 vone = _mm_load_ps(params->sse2_rr2_p6.one);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
__m128 vx0123 = _mm_loadu_ps(input);
__m128 vx4567 = _mm_loadu_ps(input + 4);
__m128 vx89AB = _mm_loadu_ps(input + 8);
__m128 vxCDEF = _mm_loadu_ps(input + 12);
__m128 vxGHIJ = _mm_loadu_ps(input + 16);
__m128 vxKLMN = _mm_loadu_ps(input + 20);
input += 24;
const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
const __m128 vz89AB = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx89AB, vprescale));
const __m128 vzCDEF = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxCDEF, vprescale));
const __m128 vzGHIJ = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxGHIJ, vprescale));
const __m128 vzKLMN = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vxKLMN, vprescale));
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vn89AB = _mm_add_ps(_mm_mul_ps(vz89AB, vlog2e), vmagic_bias);
__m128 vnCDEF = _mm_add_ps(_mm_mul_ps(vzCDEF, vlog2e), vmagic_bias);
__m128 vnGHIJ = _mm_add_ps(_mm_mul_ps(vzGHIJ, vlog2e), vmagic_bias);
__m128 vnKLMN = _mm_add_ps(_mm_mul_ps(vzKLMN, vlog2e), vmagic_bias);
__m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
__m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
__m128 vs89AB = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn89AB), 23));
__m128 vsCDEF = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnCDEF), 23));
__m128 vsGHIJ = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnGHIJ), 23));
__m128 vsKLMN = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vnKLMN), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
vn89AB = _mm_sub_ps(vn89AB, vmagic_bias);
vnCDEF = _mm_sub_ps(vnCDEF, vmagic_bias);
vnGHIJ = _mm_sub_ps(vnGHIJ, vmagic_bias);
vnKLMN = _mm_sub_ps(vnKLMN, vmagic_bias);
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
__m128 vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_hi), vz89AB);
__m128 vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_hi), vzCDEF);
__m128 vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_hi), vzGHIJ);
__m128 vtKLMN = _mm_add_ps(_mm_mul_ps(vnKLMN, vminus_ln2_hi), vzKLMN);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
vt89AB = _mm_add_ps(_mm_mul_ps(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = _mm_add_ps(_mm_mul_ps(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = _mm_add_ps(_mm_mul_ps(vnGHIJ, vminus_ln2_lo), vtGHIJ);
vtKLMN = _mm_add_ps(_mm_mul_ps(vnKLMN, vminus_ln2_lo), vtKLMN);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
__m128 vp89AB = _mm_add_ps(_mm_mul_ps(vc6, vt89AB), vc5);
__m128 vpCDEF = _mm_add_ps(_mm_mul_ps(vc6, vtCDEF), vc5);
__m128 vpGHIJ = _mm_add_ps(_mm_mul_ps(vc6, vtGHIJ), vc5);
__m128 vpKLMN = _mm_add_ps(_mm_mul_ps(vc6, vtKLMN), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc4);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc4);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc4);
vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc3);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc3);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc3);
vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vc2);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vc2);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vc2);
vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vc2);
vp0123 = _mm_mul_ps(vp0123, vt0123);
vp4567 = _mm_mul_ps(vp4567, vt4567);
vp89AB = _mm_mul_ps(vp89AB, vt89AB);
vpCDEF = _mm_mul_ps(vpCDEF, vtCDEF);
vpGHIJ = _mm_mul_ps(vpGHIJ, vtGHIJ);
vpKLMN = _mm_mul_ps(vpKLMN, vtKLMN);
vt0123 = _mm_mul_ps(vt0123, vs0123);
vs0123 = _mm_sub_ps(vs0123, vone);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vs4567 = _mm_sub_ps(vs4567, vone);
vt89AB = _mm_mul_ps(vt89AB, vs89AB);
vs89AB = _mm_sub_ps(vs89AB, vone);
vtCDEF = _mm_mul_ps(vtCDEF, vsCDEF);
vsCDEF = _mm_sub_ps(vsCDEF, vone);
vtGHIJ = _mm_mul_ps(vtGHIJ, vsGHIJ);
vsGHIJ = _mm_sub_ps(vsGHIJ, vone);
vtKLMN = _mm_mul_ps(vtKLMN, vsKLMN);
vsKLMN = _mm_sub_ps(vsKLMN, vone);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
vp89AB = _mm_add_ps(_mm_mul_ps(vp89AB, vt89AB), vt89AB);
vpCDEF = _mm_add_ps(_mm_mul_ps(vpCDEF, vtCDEF), vtCDEF);
vpGHIJ = _mm_add_ps(_mm_mul_ps(vpGHIJ, vtGHIJ), vtGHIJ);
vpKLMN = _mm_add_ps(_mm_mul_ps(vpKLMN, vtKLMN), vtKLMN);
const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
const __m128 ve89AB = _mm_mul_ps(_mm_add_ps(vp89AB, vs89AB), valpha);
const __m128 veCDEF = _mm_mul_ps(_mm_add_ps(vpCDEF, vsCDEF), valpha);
const __m128 veGHIJ = _mm_mul_ps(_mm_add_ps(vpGHIJ, vsGHIJ), valpha);
const __m128 veKLMN = _mm_mul_ps(_mm_add_ps(vpKLMN, vsKLMN), valpha);
vx0123 = _mm_mul_ps(vx0123, vbeta);
vx4567 = _mm_mul_ps(vx4567, vbeta);
vx89AB = _mm_mul_ps(vx89AB, vbeta);
vxCDEF = _mm_mul_ps(vxCDEF, vbeta);
vxGHIJ = _mm_mul_ps(vxGHIJ, vbeta);
vxKLMN = _mm_mul_ps(vxKLMN, vbeta);
const __m128 vy0123 = _mm_blendv_ps(vx0123, ve0123, vx0123);
const __m128 vy4567 = _mm_blendv_ps(vx4567, ve4567, vx4567);
const __m128 vy89AB = _mm_blendv_ps(vx89AB, ve89AB, vx89AB);
const __m128 vyCDEF = _mm_blendv_ps(vxCDEF, veCDEF, vxCDEF);
const __m128 vyGHIJ = _mm_blendv_ps(vxGHIJ, veGHIJ, vxGHIJ);
const __m128 vyKLMN = _mm_blendv_ps(vxKLMN, veKLMN, vxKLMN);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
_mm_storeu_ps(output + 16, vyGHIJ);
_mm_storeu_ps(output + 20, vyKLMN);
output += 24;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
__m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
__m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
vt = _mm_mul_ps(vt, vs);
vs = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
vx = _mm_mul_ps(vx, vbeta);
const __m128 vy = _mm_blendv_ps(vx, ve, vx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
__m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
vt = _mm_mul_ps(vt, vs);
vs = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
vx = _mm_mul_ps(vx, vbeta);
__m128 vy = _mm_blendv_ps(vx, ve, vx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 10,533 | 41.995918 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-sse41-rr2-p6-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/sse-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__sse41_rr2_p6_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vprescale = _mm_load_ps(params->sse2_rr2_p6.prescale);
const __m128 valpha = _mm_load_ps(params->sse2_rr2_p6.alpha);
const __m128 vbeta = _mm_load_ps(params->sse2_rr2_p6.beta);
const __m128 vsat_cutoff = _mm_load_ps(params->sse2_rr2_p6.sat_cutoff);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p6.magic_bias);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p6.log2e);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_lo);
const __m128 vc6 = _mm_load_ps(params->sse2_rr2_p6.c6);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p6.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p6.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p6.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p6.c2);
const __m128 vone = _mm_load_ps(params->sse2_rr2_p6.one);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
__m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
__m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
vt = _mm_mul_ps(vt, vs);
vs = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
vx = _mm_mul_ps(vx, vbeta);
const __m128 vy = _mm_blendv_ps(vx, ve, vx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
__m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
vt = _mm_mul_ps(vt, vs);
vs = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
vx = _mm_mul_ps(vx, vbeta);
__m128 vy = _mm_blendv_ps(vx, ve, vx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 3,755 | 33.145455 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-sse41-rr2-p6-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/sse-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__sse41_rr2_p6_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vprescale = _mm_load_ps(params->sse2_rr2_p6.prescale);
const __m128 valpha = _mm_load_ps(params->sse2_rr2_p6.alpha);
const __m128 vbeta = _mm_load_ps(params->sse2_rr2_p6.beta);
const __m128 vsat_cutoff = _mm_load_ps(params->sse2_rr2_p6.sat_cutoff);
const __m128 vmagic_bias = _mm_load_ps(params->sse2_rr2_p6.magic_bias);
const __m128 vlog2e = _mm_load_ps(params->sse2_rr2_p6.log2e);
const __m128 vminus_ln2_hi = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_hi);
const __m128 vminus_ln2_lo = _mm_load_ps(params->sse2_rr2_p6.minus_ln2_lo);
const __m128 vc6 = _mm_load_ps(params->sse2_rr2_p6.c6);
const __m128 vc5 = _mm_load_ps(params->sse2_rr2_p6.c5);
const __m128 vc4 = _mm_load_ps(params->sse2_rr2_p6.c4);
const __m128 vc3 = _mm_load_ps(params->sse2_rr2_p6.c3);
const __m128 vc2 = _mm_load_ps(params->sse2_rr2_p6.c2);
const __m128 vone = _mm_load_ps(params->sse2_rr2_p6.one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m128 vx0123 = _mm_loadu_ps(input);
__m128 vx4567 = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vz0123 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx0123, vprescale));
const __m128 vz4567 = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx4567, vprescale));
__m128 vn0123 = _mm_add_ps(_mm_mul_ps(vz0123, vlog2e), vmagic_bias);
__m128 vn4567 = _mm_add_ps(_mm_mul_ps(vz4567, vlog2e), vmagic_bias);
__m128 vs0123 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn0123), 23));
__m128 vs4567 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn4567), 23));
vn0123 = _mm_sub_ps(vn0123, vmagic_bias);
vn4567 = _mm_sub_ps(vn4567, vmagic_bias);
__m128 vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_hi), vz0123);
__m128 vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_hi), vz4567);
vt0123 = _mm_add_ps(_mm_mul_ps(vn0123, vminus_ln2_lo), vt0123);
vt4567 = _mm_add_ps(_mm_mul_ps(vn4567, vminus_ln2_lo), vt4567);
__m128 vp0123 = _mm_add_ps(_mm_mul_ps(vc6, vt0123), vc5);
__m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc4);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc3);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vc2);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2);
vp0123 = _mm_mul_ps(vp0123, vt0123);
vp4567 = _mm_mul_ps(vp4567, vt4567);
vt0123 = _mm_mul_ps(vt0123, vs0123);
vs0123 = _mm_sub_ps(vs0123, vone);
vt4567 = _mm_mul_ps(vt4567, vs4567);
vs4567 = _mm_sub_ps(vs4567, vone);
vp0123 = _mm_add_ps(_mm_mul_ps(vp0123, vt0123), vt0123);
vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567);
const __m128 ve0123 = _mm_mul_ps(_mm_add_ps(vp0123, vs0123), valpha);
const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha);
vx0123 = _mm_mul_ps(vx0123, vbeta);
vx4567 = _mm_mul_ps(vx4567, vbeta);
const __m128 vy0123 = _mm_blendv_ps(vx0123, ve0123, vx0123);
const __m128 vy4567 = _mm_blendv_ps(vx4567, ve4567, vx4567);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
__m128 vx = _mm_loadu_ps(input);
input += 4;
const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
__m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
vt = _mm_mul_ps(vt, vs);
vs = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
vx = _mm_mul_ps(vx, vbeta);
const __m128 vy = _mm_blendv_ps(vx, ve, vx);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx = _mm_loadu_ps(input);
const __m128 vz = _mm_max_ps(vsat_cutoff, _mm_mul_ps(vx, vprescale));
__m128 vn = _mm_add_ps(_mm_mul_ps(vz, vlog2e), vmagic_bias);
__m128 vs = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(vn), 23));
vn = _mm_sub_ps(vn, vmagic_bias);
__m128 vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_hi), vz);
vt = _mm_add_ps(_mm_mul_ps(vn, vminus_ln2_lo), vt);
__m128 vp = _mm_add_ps(_mm_mul_ps(vc6, vt), vc5);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc4);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc3);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vc2);
vp = _mm_mul_ps(vp, vt);
vt = _mm_mul_ps(vt, vs);
vs = _mm_sub_ps(vs, vone);
vp = _mm_add_ps(_mm_mul_ps(vp, vt), vt);
const __m128 ve = _mm_mul_ps(_mm_add_ps(vp, vs), valpha);
vx = _mm_mul_ps(vx, vbeta);
__m128 vy = _mm_blendv_ps(vx, ve, vx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy);
}
}
}
| 6,086 | 35.017751 | 86 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasm-rr2-lut16-p3-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vprescale = params->scalar_rr2_lut16_p3.prescale;
const float valpha = params->scalar_rr2_lut16_p3.alpha;
const float vbeta = params->scalar_rr2_lut16_p3.beta;
const float vmagic_bias = params->scalar_rr2_lut16_p3.magic_bias;
const float vlog2e = params->scalar_rr2_lut16_p3.log2e;
const uint32_t vindex_mask = UINT32_C(0xF);
const float vsat_cutoff = params->scalar_rr2_lut16_p3.sat_cutoff;
const float vminus_ln2_hi = params->scalar_rr2_lut16_p3.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_lut16_p3.minus_ln2_lo;
const float vc3 = params->scalar_rr2_lut16_p3.c3;
const float vc2 = params->scalar_rr2_lut16_p3.c2;
const float vone = params->scalar_rr2_lut16_p3.one;
do {
float vx = *input++;
const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
float vn = vz * vlog2e + vmagic_bias;
const uint32_t ven = float_as_uint32(vn) << 19;
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
vn -= vmagic_bias;
float vt = vn * vminus_ln2_hi + vz;
float vs = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx] + ven);
vt = vn * vminus_ln2_lo + vt;
float vp = vc3 * vt + vc2;
vp *= vt;
vt *= vs;
vs -= vone;
vp = vp * vt + vt;
const float ve = (vp + vs) * valpha;
float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
vy += __builtin_wasm_min_f32(ve, 0.0f);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
| 2,284 | 29.466667 | 103 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasm-rr2-lut16-p3-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vprescale = params->scalar_rr2_lut16_p3.prescale;
const float valpha = params->scalar_rr2_lut16_p3.alpha;
const float vbeta = params->scalar_rr2_lut16_p3.beta;
const float vmagic_bias = params->scalar_rr2_lut16_p3.magic_bias;
const float vlog2e = params->scalar_rr2_lut16_p3.log2e;
const uint32_t vindex_mask = UINT32_C(0xF);
const float vsat_cutoff = params->scalar_rr2_lut16_p3.sat_cutoff;
const float vminus_ln2_hi = params->scalar_rr2_lut16_p3.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_lut16_p3.minus_ln2_lo;
const float vc3 = params->scalar_rr2_lut16_p3.c3;
const float vc2 = params->scalar_rr2_lut16_p3.c2;
const float vone = params->scalar_rr2_lut16_p3.one;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
input += 2;
const float vz0 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx0 * vprescale, vsat_cutoff), 0.0f);
const float vz1 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx1 * vprescale, vsat_cutoff), 0.0f);
float vn0 = vz0 * vlog2e + vmagic_bias;
float vn1 = vz1 * vlog2e + vmagic_bias;
const uint32_t ven0 = float_as_uint32(vn0) << 19;
const uint32_t vidx0 = float_as_uint32(vn0) & vindex_mask;
vn0 -= vmagic_bias;
const uint32_t ven1 = float_as_uint32(vn1) << 19;
const uint32_t vidx1 = float_as_uint32(vn1) & vindex_mask;
vn1 -= vmagic_bias;
float vt0 = vn0 * vminus_ln2_hi + vz0;
float vs0 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
float vt1 = vn1 * vminus_ln2_hi + vz1;
float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
vt0 = vn0 * vminus_ln2_lo + vt0;
vt1 = vn1 * vminus_ln2_lo + vt1;
float vp0 = vc3 * vt0 + vc2;
float vp1 = vc3 * vt1 + vc2;
vp0 *= vt0;
vp1 *= vt1;
vt0 *= vs0;
vs0 -= vone;
vt1 *= vs1;
vs1 -= vone;
vp0 = vp0 * vt0 + vt0;
vp1 = vp1 * vt1 + vt1;
const float ve0 = (vp0 + vs0) * valpha;
float vy0 = __builtin_wasm_max_f32(vx0 * vbeta, 0.0f);
const float ve1 = (vp1 + vs1) * valpha;
float vy1 = __builtin_wasm_max_f32(vx1 * vbeta, 0.0f);
vy0 += __builtin_wasm_min_f32(ve0, 0.0f);
vy1 += __builtin_wasm_min_f32(ve1, 0.0f);
output[0] = vy0;
output[1] = vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
float vx = *input;
const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
float vn = vz * vlog2e + vmagic_bias;
const uint32_t ven = float_as_uint32(vn) << 19;
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
vn -= vmagic_bias;
float vt = vn * vminus_ln2_hi + vz;
float vs = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx] + ven);
vt = vn * vminus_ln2_lo + vt;
float vp = vc3 * vt + vc2;
vp *= vt;
vt *= vs;
vs -= vone;
vp = vp * vt + vt;
const float ve = (vp + vs) * valpha;
float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
vy += __builtin_wasm_min_f32(ve, 0.0f);
*output = vy;
}
}
| 3,880 | 30.048 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasm-rr2-lut16-p3-x3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vprescale = params->scalar_rr2_lut16_p3.prescale;
const float valpha = params->scalar_rr2_lut16_p3.alpha;
const float vbeta = params->scalar_rr2_lut16_p3.beta;
const float vmagic_bias = params->scalar_rr2_lut16_p3.magic_bias;
const float vlog2e = params->scalar_rr2_lut16_p3.log2e;
const uint32_t vindex_mask = UINT32_C(0xF);
const float vsat_cutoff = params->scalar_rr2_lut16_p3.sat_cutoff;
const float vminus_ln2_hi = params->scalar_rr2_lut16_p3.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_lut16_p3.minus_ln2_lo;
const float vc3 = params->scalar_rr2_lut16_p3.c3;
const float vc2 = params->scalar_rr2_lut16_p3.c2;
const float vone = params->scalar_rr2_lut16_p3.one;
for (; batch >= 3 * sizeof(float); batch -= 3 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
input += 3;
const float vz0 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx0 * vprescale, vsat_cutoff), 0.0f);
const float vz1 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx1 * vprescale, vsat_cutoff), 0.0f);
const float vz2 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx2 * vprescale, vsat_cutoff), 0.0f);
float vn0 = vz0 * vlog2e + vmagic_bias;
float vn1 = vz1 * vlog2e + vmagic_bias;
float vn2 = vz2 * vlog2e + vmagic_bias;
const uint32_t ven0 = float_as_uint32(vn0) << 19;
const uint32_t vidx0 = float_as_uint32(vn0) & vindex_mask;
vn0 -= vmagic_bias;
const uint32_t ven1 = float_as_uint32(vn1) << 19;
const uint32_t vidx1 = float_as_uint32(vn1) & vindex_mask;
vn1 -= vmagic_bias;
const uint32_t ven2 = float_as_uint32(vn2) << 19;
const uint32_t vidx2 = float_as_uint32(vn2) & vindex_mask;
vn2 -= vmagic_bias;
float vt0 = vn0 * vminus_ln2_hi + vz0;
float vs0 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
float vt1 = vn1 * vminus_ln2_hi + vz1;
float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
float vt2 = vn2 * vminus_ln2_hi + vz2;
float vs2 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx2] + ven2);
vt0 = vn0 * vminus_ln2_lo + vt0;
vt1 = vn1 * vminus_ln2_lo + vt1;
vt2 = vn2 * vminus_ln2_lo + vt2;
float vp0 = vc3 * vt0 + vc2;
float vp1 = vc3 * vt1 + vc2;
float vp2 = vc3 * vt2 + vc2;
vp0 *= vt0;
vp1 *= vt1;
vp2 *= vt2;
vt0 *= vs0;
vs0 -= vone;
vt1 *= vs1;
vs1 -= vone;
vt2 *= vs2;
vs2 -= vone;
vp0 = vp0 * vt0 + vt0;
vp1 = vp1 * vt1 + vt1;
vp2 = vp2 * vt2 + vt2;
const float ve0 = (vp0 + vs0) * valpha;
float vy0 = __builtin_wasm_max_f32(vx0 * vbeta, 0.0f);
const float ve1 = (vp1 + vs1) * valpha;
float vy1 = __builtin_wasm_max_f32(vx1 * vbeta, 0.0f);
const float ve2 = (vp2 + vs2) * valpha;
float vy2 = __builtin_wasm_max_f32(vx2 * vbeta, 0.0f);
vy0 += __builtin_wasm_min_f32(ve0, 0.0f);
vy1 += __builtin_wasm_min_f32(ve1, 0.0f);
vy2 += __builtin_wasm_min_f32(ve2, 0.0f);
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output += 3;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
float vn = vz * vlog2e + vmagic_bias;
const uint32_t ven = float_as_uint32(vn) << 19;
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
vn -= vmagic_bias;
float vt = vn * vminus_ln2_hi + vz;
float vs = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx] + ven);
vt = vn * vminus_ln2_lo + vt;
float vp = vc3 * vt + vc2;
vp *= vt;
vt *= vs;
vs -= vone;
vp = vp * vt + vt;
const float ve = (vp + vs) * valpha;
float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
vy += __builtin_wasm_min_f32(ve, 0.0f);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 4,740 | 31.251701 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasm-rr2-lut16-p3-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vprescale = params->scalar_rr2_lut16_p3.prescale;
const float valpha = params->scalar_rr2_lut16_p3.alpha;
const float vbeta = params->scalar_rr2_lut16_p3.beta;
const float vmagic_bias = params->scalar_rr2_lut16_p3.magic_bias;
const float vlog2e = params->scalar_rr2_lut16_p3.log2e;
const uint32_t vindex_mask = UINT32_C(0xF);
const float vsat_cutoff = params->scalar_rr2_lut16_p3.sat_cutoff;
const float vminus_ln2_hi = params->scalar_rr2_lut16_p3.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_lut16_p3.minus_ln2_lo;
const float vc3 = params->scalar_rr2_lut16_p3.c3;
const float vc2 = params->scalar_rr2_lut16_p3.c2;
const float vone = params->scalar_rr2_lut16_p3.one;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
float vx3 = input[3];
input += 4;
const float vz0 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx0 * vprescale, vsat_cutoff), 0.0f);
const float vz1 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx1 * vprescale, vsat_cutoff), 0.0f);
const float vz2 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx2 * vprescale, vsat_cutoff), 0.0f);
const float vz3 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx3 * vprescale, vsat_cutoff), 0.0f);
float vn0 = vz0 * vlog2e + vmagic_bias;
float vn1 = vz1 * vlog2e + vmagic_bias;
float vn2 = vz2 * vlog2e + vmagic_bias;
float vn3 = vz3 * vlog2e + vmagic_bias;
const uint32_t ven0 = float_as_uint32(vn0) << 19;
const uint32_t vidx0 = float_as_uint32(vn0) & vindex_mask;
vn0 -= vmagic_bias;
const uint32_t ven1 = float_as_uint32(vn1) << 19;
const uint32_t vidx1 = float_as_uint32(vn1) & vindex_mask;
vn1 -= vmagic_bias;
const uint32_t ven2 = float_as_uint32(vn2) << 19;
const uint32_t vidx2 = float_as_uint32(vn2) & vindex_mask;
vn2 -= vmagic_bias;
const uint32_t ven3 = float_as_uint32(vn3) << 19;
const uint32_t vidx3 = float_as_uint32(vn3) & vindex_mask;
vn3 -= vmagic_bias;
float vt0 = vn0 * vminus_ln2_hi + vz0;
float vs0 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
float vt1 = vn1 * vminus_ln2_hi + vz1;
float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
float vt2 = vn2 * vminus_ln2_hi + vz2;
float vs2 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx2] + ven2);
float vt3 = vn3 * vminus_ln2_hi + vz3;
float vs3 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx3] + ven3);
vt0 = vn0 * vminus_ln2_lo + vt0;
vt1 = vn1 * vminus_ln2_lo + vt1;
vt2 = vn2 * vminus_ln2_lo + vt2;
vt3 = vn3 * vminus_ln2_lo + vt3;
float vp0 = vc3 * vt0 + vc2;
float vp1 = vc3 * vt1 + vc2;
float vp2 = vc3 * vt2 + vc2;
float vp3 = vc3 * vt3 + vc2;
vp0 *= vt0;
vp1 *= vt1;
vp2 *= vt2;
vp3 *= vt3;
vt0 *= vs0;
vs0 -= vone;
vt1 *= vs1;
vs1 -= vone;
vt2 *= vs2;
vs2 -= vone;
vt3 *= vs3;
vs3 -= vone;
vp0 = vp0 * vt0 + vt0;
vp1 = vp1 * vt1 + vt1;
vp2 = vp2 * vt2 + vt2;
vp3 = vp3 * vt3 + vt3;
const float ve0 = (vp0 + vs0) * valpha;
float vy0 = __builtin_wasm_max_f32(vx0 * vbeta, 0.0f);
const float ve1 = (vp1 + vs1) * valpha;
float vy1 = __builtin_wasm_max_f32(vx1 * vbeta, 0.0f);
const float ve2 = (vp2 + vs2) * valpha;
float vy2 = __builtin_wasm_max_f32(vx2 * vbeta, 0.0f);
const float ve3 = (vp3 + vs3) * valpha;
float vy3 = __builtin_wasm_max_f32(vx3 * vbeta, 0.0f);
vy0 += __builtin_wasm_min_f32(ve0, 0.0f);
vy1 += __builtin_wasm_min_f32(ve1, 0.0f);
vy2 += __builtin_wasm_min_f32(ve2, 0.0f);
vy3 += __builtin_wasm_min_f32(ve3, 0.0f);
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
float vn = vz * vlog2e + vmagic_bias;
const uint32_t ven = float_as_uint32(vn) << 19;
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
vn -= vmagic_bias;
float vt = vn * vminus_ln2_hi + vz;
float vs = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx] + ven);
vt = vn * vminus_ln2_lo + vt;
float vp = vc3 * vt + vc2;
vp *= vt;
vt *= vs;
vs -= vone;
vp = vp * vt + vt;
const float ve = (vp + vs) * valpha;
float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
vy += __builtin_wasm_min_f32(ve, 0.0f);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 5,494 | 32.30303 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasm-rr2-lut16-p3-x5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x5(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vprescale = params->scalar_rr2_lut16_p3.prescale;
const float valpha = params->scalar_rr2_lut16_p3.alpha;
const float vbeta = params->scalar_rr2_lut16_p3.beta;
const float vmagic_bias = params->scalar_rr2_lut16_p3.magic_bias;
const float vlog2e = params->scalar_rr2_lut16_p3.log2e;
const uint32_t vindex_mask = UINT32_C(0xF);
const float vsat_cutoff = params->scalar_rr2_lut16_p3.sat_cutoff;
const float vminus_ln2_hi = params->scalar_rr2_lut16_p3.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_lut16_p3.minus_ln2_lo;
const float vc3 = params->scalar_rr2_lut16_p3.c3;
const float vc2 = params->scalar_rr2_lut16_p3.c2;
const float vone = params->scalar_rr2_lut16_p3.one;
for (; batch >= 5 * sizeof(float); batch -= 5 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
float vx3 = input[3];
float vx4 = input[4];
input += 5;
const float vz0 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx0 * vprescale, vsat_cutoff), 0.0f);
const float vz1 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx1 * vprescale, vsat_cutoff), 0.0f);
const float vz2 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx2 * vprescale, vsat_cutoff), 0.0f);
const float vz3 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx3 * vprescale, vsat_cutoff), 0.0f);
const float vz4 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx4 * vprescale, vsat_cutoff), 0.0f);
float vn0 = vz0 * vlog2e + vmagic_bias;
float vn1 = vz1 * vlog2e + vmagic_bias;
float vn2 = vz2 * vlog2e + vmagic_bias;
float vn3 = vz3 * vlog2e + vmagic_bias;
float vn4 = vz4 * vlog2e + vmagic_bias;
const uint32_t ven0 = float_as_uint32(vn0) << 19;
const uint32_t vidx0 = float_as_uint32(vn0) & vindex_mask;
vn0 -= vmagic_bias;
const uint32_t ven1 = float_as_uint32(vn1) << 19;
const uint32_t vidx1 = float_as_uint32(vn1) & vindex_mask;
vn1 -= vmagic_bias;
const uint32_t ven2 = float_as_uint32(vn2) << 19;
const uint32_t vidx2 = float_as_uint32(vn2) & vindex_mask;
vn2 -= vmagic_bias;
const uint32_t ven3 = float_as_uint32(vn3) << 19;
const uint32_t vidx3 = float_as_uint32(vn3) & vindex_mask;
vn3 -= vmagic_bias;
const uint32_t ven4 = float_as_uint32(vn4) << 19;
const uint32_t vidx4 = float_as_uint32(vn4) & vindex_mask;
vn4 -= vmagic_bias;
float vt0 = vn0 * vminus_ln2_hi + vz0;
float vs0 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
float vt1 = vn1 * vminus_ln2_hi + vz1;
float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
float vt2 = vn2 * vminus_ln2_hi + vz2;
float vs2 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx2] + ven2);
float vt3 = vn3 * vminus_ln2_hi + vz3;
float vs3 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx3] + ven3);
float vt4 = vn4 * vminus_ln2_hi + vz4;
float vs4 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx4] + ven4);
vt0 = vn0 * vminus_ln2_lo + vt0;
vt1 = vn1 * vminus_ln2_lo + vt1;
vt2 = vn2 * vminus_ln2_lo + vt2;
vt3 = vn3 * vminus_ln2_lo + vt3;
vt4 = vn4 * vminus_ln2_lo + vt4;
float vp0 = vc3 * vt0 + vc2;
float vp1 = vc3 * vt1 + vc2;
float vp2 = vc3 * vt2 + vc2;
float vp3 = vc3 * vt3 + vc2;
float vp4 = vc3 * vt4 + vc2;
vp0 *= vt0;
vp1 *= vt1;
vp2 *= vt2;
vp3 *= vt3;
vp4 *= vt4;
vt0 *= vs0;
vs0 -= vone;
vt1 *= vs1;
vs1 -= vone;
vt2 *= vs2;
vs2 -= vone;
vt3 *= vs3;
vs3 -= vone;
vt4 *= vs4;
vs4 -= vone;
vp0 = vp0 * vt0 + vt0;
vp1 = vp1 * vt1 + vt1;
vp2 = vp2 * vt2 + vt2;
vp3 = vp3 * vt3 + vt3;
vp4 = vp4 * vt4 + vt4;
const float ve0 = (vp0 + vs0) * valpha;
float vy0 = __builtin_wasm_max_f32(vx0 * vbeta, 0.0f);
const float ve1 = (vp1 + vs1) * valpha;
float vy1 = __builtin_wasm_max_f32(vx1 * vbeta, 0.0f);
const float ve2 = (vp2 + vs2) * valpha;
float vy2 = __builtin_wasm_max_f32(vx2 * vbeta, 0.0f);
const float ve3 = (vp3 + vs3) * valpha;
float vy3 = __builtin_wasm_max_f32(vx3 * vbeta, 0.0f);
const float ve4 = (vp4 + vs4) * valpha;
float vy4 = __builtin_wasm_max_f32(vx4 * vbeta, 0.0f);
vy0 += __builtin_wasm_min_f32(ve0, 0.0f);
vy1 += __builtin_wasm_min_f32(ve1, 0.0f);
vy2 += __builtin_wasm_min_f32(ve2, 0.0f);
vy3 += __builtin_wasm_min_f32(ve3, 0.0f);
vy4 += __builtin_wasm_min_f32(ve4, 0.0f);
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output[4] = vy4;
output += 5;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
float vn = vz * vlog2e + vmagic_bias;
const uint32_t ven = float_as_uint32(vn) << 19;
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
vn -= vmagic_bias;
float vt = vn * vminus_ln2_hi + vz;
float vs = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx] + ven);
vt = vn * vminus_ln2_lo + vt;
float vp = vc3 * vt + vc2;
vp *= vt;
vt *= vs;
vs -= vone;
vp = vp * vt + vt;
const float ve = (vp + vs) * valpha;
float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
vy += __builtin_wasm_min_f32(ve, 0.0f);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 6,248 | 33.147541 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasm-rr2-lut16-p3-x6.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x6(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vprescale = params->scalar_rr2_lut16_p3.prescale;
const float valpha = params->scalar_rr2_lut16_p3.alpha;
const float vbeta = params->scalar_rr2_lut16_p3.beta;
const float vmagic_bias = params->scalar_rr2_lut16_p3.magic_bias;
const float vlog2e = params->scalar_rr2_lut16_p3.log2e;
const uint32_t vindex_mask = UINT32_C(0xF);
const float vsat_cutoff = params->scalar_rr2_lut16_p3.sat_cutoff;
const float vminus_ln2_hi = params->scalar_rr2_lut16_p3.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_lut16_p3.minus_ln2_lo;
const float vc3 = params->scalar_rr2_lut16_p3.c3;
const float vc2 = params->scalar_rr2_lut16_p3.c2;
const float vone = params->scalar_rr2_lut16_p3.one;
for (; batch >= 6 * sizeof(float); batch -= 6 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
float vx3 = input[3];
float vx4 = input[4];
float vx5 = input[5];
input += 6;
const float vz0 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx0 * vprescale, vsat_cutoff), 0.0f);
const float vz1 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx1 * vprescale, vsat_cutoff), 0.0f);
const float vz2 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx2 * vprescale, vsat_cutoff), 0.0f);
const float vz3 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx3 * vprescale, vsat_cutoff), 0.0f);
const float vz4 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx4 * vprescale, vsat_cutoff), 0.0f);
const float vz5 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx5 * vprescale, vsat_cutoff), 0.0f);
float vn0 = vz0 * vlog2e + vmagic_bias;
float vn1 = vz1 * vlog2e + vmagic_bias;
float vn2 = vz2 * vlog2e + vmagic_bias;
float vn3 = vz3 * vlog2e + vmagic_bias;
float vn4 = vz4 * vlog2e + vmagic_bias;
float vn5 = vz5 * vlog2e + vmagic_bias;
const uint32_t ven0 = float_as_uint32(vn0) << 19;
const uint32_t vidx0 = float_as_uint32(vn0) & vindex_mask;
vn0 -= vmagic_bias;
const uint32_t ven1 = float_as_uint32(vn1) << 19;
const uint32_t vidx1 = float_as_uint32(vn1) & vindex_mask;
vn1 -= vmagic_bias;
const uint32_t ven2 = float_as_uint32(vn2) << 19;
const uint32_t vidx2 = float_as_uint32(vn2) & vindex_mask;
vn2 -= vmagic_bias;
const uint32_t ven3 = float_as_uint32(vn3) << 19;
const uint32_t vidx3 = float_as_uint32(vn3) & vindex_mask;
vn3 -= vmagic_bias;
const uint32_t ven4 = float_as_uint32(vn4) << 19;
const uint32_t vidx4 = float_as_uint32(vn4) & vindex_mask;
vn4 -= vmagic_bias;
const uint32_t ven5 = float_as_uint32(vn5) << 19;
const uint32_t vidx5 = float_as_uint32(vn5) & vindex_mask;
vn5 -= vmagic_bias;
float vt0 = vn0 * vminus_ln2_hi + vz0;
float vs0 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
float vt1 = vn1 * vminus_ln2_hi + vz1;
float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
float vt2 = vn2 * vminus_ln2_hi + vz2;
float vs2 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx2] + ven2);
float vt3 = vn3 * vminus_ln2_hi + vz3;
float vs3 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx3] + ven3);
float vt4 = vn4 * vminus_ln2_hi + vz4;
float vs4 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx4] + ven4);
float vt5 = vn5 * vminus_ln2_hi + vz5;
float vs5 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx5] + ven5);
vt0 = vn0 * vminus_ln2_lo + vt0;
vt1 = vn1 * vminus_ln2_lo + vt1;
vt2 = vn2 * vminus_ln2_lo + vt2;
vt3 = vn3 * vminus_ln2_lo + vt3;
vt4 = vn4 * vminus_ln2_lo + vt4;
vt5 = vn5 * vminus_ln2_lo + vt5;
float vp0 = vc3 * vt0 + vc2;
float vp1 = vc3 * vt1 + vc2;
float vp2 = vc3 * vt2 + vc2;
float vp3 = vc3 * vt3 + vc2;
float vp4 = vc3 * vt4 + vc2;
float vp5 = vc3 * vt5 + vc2;
vp0 *= vt0;
vp1 *= vt1;
vp2 *= vt2;
vp3 *= vt3;
vp4 *= vt4;
vp5 *= vt5;
vt0 *= vs0;
vs0 -= vone;
vt1 *= vs1;
vs1 -= vone;
vt2 *= vs2;
vs2 -= vone;
vt3 *= vs3;
vs3 -= vone;
vt4 *= vs4;
vs4 -= vone;
vt5 *= vs5;
vs5 -= vone;
vp0 = vp0 * vt0 + vt0;
vp1 = vp1 * vt1 + vt1;
vp2 = vp2 * vt2 + vt2;
vp3 = vp3 * vt3 + vt3;
vp4 = vp4 * vt4 + vt4;
vp5 = vp5 * vt5 + vt5;
const float ve0 = (vp0 + vs0) * valpha;
float vy0 = __builtin_wasm_max_f32(vx0 * vbeta, 0.0f);
const float ve1 = (vp1 + vs1) * valpha;
float vy1 = __builtin_wasm_max_f32(vx1 * vbeta, 0.0f);
const float ve2 = (vp2 + vs2) * valpha;
float vy2 = __builtin_wasm_max_f32(vx2 * vbeta, 0.0f);
const float ve3 = (vp3 + vs3) * valpha;
float vy3 = __builtin_wasm_max_f32(vx3 * vbeta, 0.0f);
const float ve4 = (vp4 + vs4) * valpha;
float vy4 = __builtin_wasm_max_f32(vx4 * vbeta, 0.0f);
const float ve5 = (vp5 + vs5) * valpha;
float vy5 = __builtin_wasm_max_f32(vx5 * vbeta, 0.0f);
vy0 += __builtin_wasm_min_f32(ve0, 0.0f);
vy1 += __builtin_wasm_min_f32(ve1, 0.0f);
vy2 += __builtin_wasm_min_f32(ve2, 0.0f);
vy3 += __builtin_wasm_min_f32(ve3, 0.0f);
vy4 += __builtin_wasm_min_f32(ve4, 0.0f);
vy5 += __builtin_wasm_min_f32(ve5, 0.0f);
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output[4] = vy4;
output[5] = vy5;
output += 6;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
float vn = vz * vlog2e + vmagic_bias;
const uint32_t ven = float_as_uint32(vn) << 19;
const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
vn -= vmagic_bias;
float vt = vn * vminus_ln2_hi + vz;
float vs = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx] + ven);
vt = vn * vminus_ln2_lo + vt;
float vp = vc3 * vt + vc2;
vp *= vt;
vt *= vs;
vs -= vone;
vp = vp * vt + vt;
const float ve = (vp + vs) * valpha;
float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
vy += __builtin_wasm_min_f32(ve, 0.0f);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 7,002 | 33.840796 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasm-rr2-p6-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/scalar-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__wasm_rr2_p6_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vprescale = params->scalar_rr2_p6.prescale;
const float valpha = params->scalar_rr2_p6.alpha;
const float vbeta = params->scalar_rr2_p6.beta;
const float vmagic_bias = params->scalar_rr2_p6.magic_bias;
const float vlog2e = params->scalar_rr2_p6.log2e;
const float vsat_cutoff = params->scalar_rr2_p6.sat_cutoff;
const float vminus_ln2_hi = params->scalar_rr2_p6.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_p6.minus_ln2_lo;
const float vc6 = params->scalar_rr2_p6.c6;
const float vc5 = params->scalar_rr2_p6.c5;
const float vc4 = params->scalar_rr2_p6.c4;
const float vc3 = params->scalar_rr2_p6.c3;
const float vc2 = params->scalar_rr2_p6.c2;
const float vone = params->scalar_rr2_p6.one;
do {
float vx = *input++;
const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
float vn = vz * vlog2e + vmagic_bias;
float vs = uint32_as_float(float_as_uint32(vn) << 23);
vn -= vmagic_bias;
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
vt *= vs;
vs -= vone;
vp = vp * vt + vt;
const float ve = (vp + vs) * valpha;
float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
vy += __builtin_wasm_min_f32(ve, 0.0f);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
| 2,170 | 27.565789 | 103 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasm-rr2-p6-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/scalar-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__wasm_rr2_p6_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vprescale = params->scalar_rr2_p6.prescale;
const float valpha = params->scalar_rr2_p6.alpha;
const float vbeta = params->scalar_rr2_p6.beta;
const float vmagic_bias = params->scalar_rr2_p6.magic_bias;
const float vlog2e = params->scalar_rr2_p6.log2e;
const float vsat_cutoff = params->scalar_rr2_p6.sat_cutoff;
const float vminus_ln2_hi = params->scalar_rr2_p6.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_p6.minus_ln2_lo;
const float vc6 = params->scalar_rr2_p6.c6;
const float vc5 = params->scalar_rr2_p6.c5;
const float vc4 = params->scalar_rr2_p6.c4;
const float vc3 = params->scalar_rr2_p6.c3;
const float vc2 = params->scalar_rr2_p6.c2;
const float vone = params->scalar_rr2_p6.one;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
input += 2;
const float vz0 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx0 * vprescale, vsat_cutoff), 0.0f);
const float vz1 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx1 * vprescale, vsat_cutoff), 0.0f);
float vn0 = vz0 * vlog2e + vmagic_bias;
float vn1 = vz1 * vlog2e + vmagic_bias;
float vs0 = uint32_as_float(float_as_uint32(vn0) << 23);
vn0 -= vmagic_bias;
float vs1 = uint32_as_float(float_as_uint32(vn1) << 23);
vn1 -= vmagic_bias;
float vt0 = vn0 * vminus_ln2_hi + vz0;
float vt1 = vn1 * vminus_ln2_hi + vz1;
vt0 = vn0 * vminus_ln2_lo + vt0;
vt1 = vn1 * vminus_ln2_lo + vt1;
float vp0 = vc6 * vt0 + vc5;
float vp1 = vc6 * vt1 + vc5;
vp0 = vp0 * vt0 + vc4;
vp1 = vp1 * vt1 + vc4;
vp0 = vp0 * vt0 + vc3;
vp1 = vp1 * vt1 + vc3;
vp0 = vp0 * vt0 + vc2;
vp1 = vp1 * vt1 + vc2;
vp0 *= vt0;
vp1 *= vt1;
vt0 *= vs0;
vs0 -= vone;
vt1 *= vs1;
vs1 -= vone;
vp0 = vp0 * vt0 + vt0;
vp1 = vp1 * vt1 + vt1;
const float ve0 = (vp0 + vs0) * valpha;
float vy0 = __builtin_wasm_max_f32(vx0 * vbeta, 0.0f);
const float ve1 = (vp1 + vs1) * valpha;
float vy1 = __builtin_wasm_max_f32(vx1 * vbeta, 0.0f);
vy0 += __builtin_wasm_min_f32(ve0, 0.0f);
vy1 += __builtin_wasm_min_f32(ve1, 0.0f);
output[0] = vy0;
output[1] = vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
float vx = *input;
const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
float vn = vz * vlog2e + vmagic_bias;
float vs = uint32_as_float(float_as_uint32(vn) << 23);
vn -= vmagic_bias;
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
vt *= vs;
vs -= vone;
vp = vp * vt + vt;
const float ve = (vp + vs) * valpha;
float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
vy += __builtin_wasm_min_f32(ve, 0.0f);
*output = vy;
}
}
| 3,664 | 26.765152 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasm-rr2-p6-x3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/scalar-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__wasm_rr2_p6_x3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vprescale = params->scalar_rr2_p6.prescale;
const float valpha = params->scalar_rr2_p6.alpha;
const float vbeta = params->scalar_rr2_p6.beta;
const float vmagic_bias = params->scalar_rr2_p6.magic_bias;
const float vlog2e = params->scalar_rr2_p6.log2e;
const float vsat_cutoff = params->scalar_rr2_p6.sat_cutoff;
const float vminus_ln2_hi = params->scalar_rr2_p6.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_p6.minus_ln2_lo;
const float vc6 = params->scalar_rr2_p6.c6;
const float vc5 = params->scalar_rr2_p6.c5;
const float vc4 = params->scalar_rr2_p6.c4;
const float vc3 = params->scalar_rr2_p6.c3;
const float vc2 = params->scalar_rr2_p6.c2;
const float vone = params->scalar_rr2_p6.one;
for (; batch >= 3 * sizeof(float); batch -= 3 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
input += 3;
const float vz0 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx0 * vprescale, vsat_cutoff), 0.0f);
const float vz1 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx1 * vprescale, vsat_cutoff), 0.0f);
const float vz2 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx2 * vprescale, vsat_cutoff), 0.0f);
float vn0 = vz0 * vlog2e + vmagic_bias;
float vn1 = vz1 * vlog2e + vmagic_bias;
float vn2 = vz2 * vlog2e + vmagic_bias;
float vs0 = uint32_as_float(float_as_uint32(vn0) << 23);
vn0 -= vmagic_bias;
float vs1 = uint32_as_float(float_as_uint32(vn1) << 23);
vn1 -= vmagic_bias;
float vs2 = uint32_as_float(float_as_uint32(vn2) << 23);
vn2 -= vmagic_bias;
float vt0 = vn0 * vminus_ln2_hi + vz0;
float vt1 = vn1 * vminus_ln2_hi + vz1;
float vt2 = vn2 * vminus_ln2_hi + vz2;
vt0 = vn0 * vminus_ln2_lo + vt0;
vt1 = vn1 * vminus_ln2_lo + vt1;
vt2 = vn2 * vminus_ln2_lo + vt2;
float vp0 = vc6 * vt0 + vc5;
float vp1 = vc6 * vt1 + vc5;
float vp2 = vc6 * vt2 + vc5;
vp0 = vp0 * vt0 + vc4;
vp1 = vp1 * vt1 + vc4;
vp2 = vp2 * vt2 + vc4;
vp0 = vp0 * vt0 + vc3;
vp1 = vp1 * vt1 + vc3;
vp2 = vp2 * vt2 + vc3;
vp0 = vp0 * vt0 + vc2;
vp1 = vp1 * vt1 + vc2;
vp2 = vp2 * vt2 + vc2;
vp0 *= vt0;
vp1 *= vt1;
vp2 *= vt2;
vt0 *= vs0;
vs0 -= vone;
vt1 *= vs1;
vs1 -= vone;
vt2 *= vs2;
vs2 -= vone;
vp0 = vp0 * vt0 + vt0;
vp1 = vp1 * vt1 + vt1;
vp2 = vp2 * vt2 + vt2;
const float ve0 = (vp0 + vs0) * valpha;
float vy0 = __builtin_wasm_max_f32(vx0 * vbeta, 0.0f);
const float ve1 = (vp1 + vs1) * valpha;
float vy1 = __builtin_wasm_max_f32(vx1 * vbeta, 0.0f);
const float ve2 = (vp2 + vs2) * valpha;
float vy2 = __builtin_wasm_max_f32(vx2 * vbeta, 0.0f);
vy0 += __builtin_wasm_min_f32(ve0, 0.0f);
vy1 += __builtin_wasm_min_f32(ve1, 0.0f);
vy2 += __builtin_wasm_min_f32(ve2, 0.0f);
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output += 3;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
float vn = vz * vlog2e + vmagic_bias;
float vs = uint32_as_float(float_as_uint32(vn) << 23);
vn -= vmagic_bias;
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
vt *= vs;
vs -= vone;
vp = vp * vt + vt;
const float ve = (vp + vs) * valpha;
float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
vy += __builtin_wasm_min_f32(ve, 0.0f);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 4,473 | 27.864516 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasm-rr2-p6-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/scalar-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__wasm_rr2_p6_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vprescale = params->scalar_rr2_p6.prescale;
const float valpha = params->scalar_rr2_p6.alpha;
const float vbeta = params->scalar_rr2_p6.beta;
const float vmagic_bias = params->scalar_rr2_p6.magic_bias;
const float vlog2e = params->scalar_rr2_p6.log2e;
const float vsat_cutoff = params->scalar_rr2_p6.sat_cutoff;
const float vminus_ln2_hi = params->scalar_rr2_p6.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_p6.minus_ln2_lo;
const float vc6 = params->scalar_rr2_p6.c6;
const float vc5 = params->scalar_rr2_p6.c5;
const float vc4 = params->scalar_rr2_p6.c4;
const float vc3 = params->scalar_rr2_p6.c3;
const float vc2 = params->scalar_rr2_p6.c2;
const float vone = params->scalar_rr2_p6.one;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
float vx3 = input[3];
input += 4;
const float vz0 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx0 * vprescale, vsat_cutoff), 0.0f);
const float vz1 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx1 * vprescale, vsat_cutoff), 0.0f);
const float vz2 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx2 * vprescale, vsat_cutoff), 0.0f);
const float vz3 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx3 * vprescale, vsat_cutoff), 0.0f);
float vn0 = vz0 * vlog2e + vmagic_bias;
float vn1 = vz1 * vlog2e + vmagic_bias;
float vn2 = vz2 * vlog2e + vmagic_bias;
float vn3 = vz3 * vlog2e + vmagic_bias;
float vs0 = uint32_as_float(float_as_uint32(vn0) << 23);
vn0 -= vmagic_bias;
float vs1 = uint32_as_float(float_as_uint32(vn1) << 23);
vn1 -= vmagic_bias;
float vs2 = uint32_as_float(float_as_uint32(vn2) << 23);
vn2 -= vmagic_bias;
float vs3 = uint32_as_float(float_as_uint32(vn3) << 23);
vn3 -= vmagic_bias;
float vt0 = vn0 * vminus_ln2_hi + vz0;
float vt1 = vn1 * vminus_ln2_hi + vz1;
float vt2 = vn2 * vminus_ln2_hi + vz2;
float vt3 = vn3 * vminus_ln2_hi + vz3;
vt0 = vn0 * vminus_ln2_lo + vt0;
vt1 = vn1 * vminus_ln2_lo + vt1;
vt2 = vn2 * vminus_ln2_lo + vt2;
vt3 = vn3 * vminus_ln2_lo + vt3;
float vp0 = vc6 * vt0 + vc5;
float vp1 = vc6 * vt1 + vc5;
float vp2 = vc6 * vt2 + vc5;
float vp3 = vc6 * vt3 + vc5;
vp0 = vp0 * vt0 + vc4;
vp1 = vp1 * vt1 + vc4;
vp2 = vp2 * vt2 + vc4;
vp3 = vp3 * vt3 + vc4;
vp0 = vp0 * vt0 + vc3;
vp1 = vp1 * vt1 + vc3;
vp2 = vp2 * vt2 + vc3;
vp3 = vp3 * vt3 + vc3;
vp0 = vp0 * vt0 + vc2;
vp1 = vp1 * vt1 + vc2;
vp2 = vp2 * vt2 + vc2;
vp3 = vp3 * vt3 + vc2;
vp0 *= vt0;
vp1 *= vt1;
vp2 *= vt2;
vp3 *= vt3;
vt0 *= vs0;
vs0 -= vone;
vt1 *= vs1;
vs1 -= vone;
vt2 *= vs2;
vs2 -= vone;
vt3 *= vs3;
vs3 -= vone;
vp0 = vp0 * vt0 + vt0;
vp1 = vp1 * vt1 + vt1;
vp2 = vp2 * vt2 + vt2;
vp3 = vp3 * vt3 + vt3;
const float ve0 = (vp0 + vs0) * valpha;
float vy0 = __builtin_wasm_max_f32(vx0 * vbeta, 0.0f);
const float ve1 = (vp1 + vs1) * valpha;
float vy1 = __builtin_wasm_max_f32(vx1 * vbeta, 0.0f);
const float ve2 = (vp2 + vs2) * valpha;
float vy2 = __builtin_wasm_max_f32(vx2 * vbeta, 0.0f);
const float ve3 = (vp3 + vs3) * valpha;
float vy3 = __builtin_wasm_max_f32(vx3 * vbeta, 0.0f);
vy0 += __builtin_wasm_min_f32(ve0, 0.0f);
vy1 += __builtin_wasm_min_f32(ve1, 0.0f);
vy2 += __builtin_wasm_min_f32(ve2, 0.0f);
vy3 += __builtin_wasm_min_f32(ve3, 0.0f);
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
float vn = vz * vlog2e + vmagic_bias;
float vs = uint32_as_float(float_as_uint32(vn) << 23);
vn -= vmagic_bias;
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
vt *= vs;
vs -= vone;
vp = vp * vt + vt;
const float ve = (vp + vs) * valpha;
float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
vy += __builtin_wasm_min_f32(ve, 0.0f);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 5,174 | 28.741379 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasm-rr2-p6-x5.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/scalar-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__wasm_rr2_p6_x5(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vprescale = params->scalar_rr2_p6.prescale;
const float valpha = params->scalar_rr2_p6.alpha;
const float vbeta = params->scalar_rr2_p6.beta;
const float vmagic_bias = params->scalar_rr2_p6.magic_bias;
const float vlog2e = params->scalar_rr2_p6.log2e;
const float vsat_cutoff = params->scalar_rr2_p6.sat_cutoff;
const float vminus_ln2_hi = params->scalar_rr2_p6.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_p6.minus_ln2_lo;
const float vc6 = params->scalar_rr2_p6.c6;
const float vc5 = params->scalar_rr2_p6.c5;
const float vc4 = params->scalar_rr2_p6.c4;
const float vc3 = params->scalar_rr2_p6.c3;
const float vc2 = params->scalar_rr2_p6.c2;
const float vone = params->scalar_rr2_p6.one;
for (; batch >= 5 * sizeof(float); batch -= 5 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
float vx3 = input[3];
float vx4 = input[4];
input += 5;
const float vz0 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx0 * vprescale, vsat_cutoff), 0.0f);
const float vz1 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx1 * vprescale, vsat_cutoff), 0.0f);
const float vz2 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx2 * vprescale, vsat_cutoff), 0.0f);
const float vz3 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx3 * vprescale, vsat_cutoff), 0.0f);
const float vz4 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx4 * vprescale, vsat_cutoff), 0.0f);
float vn0 = vz0 * vlog2e + vmagic_bias;
float vn1 = vz1 * vlog2e + vmagic_bias;
float vn2 = vz2 * vlog2e + vmagic_bias;
float vn3 = vz3 * vlog2e + vmagic_bias;
float vn4 = vz4 * vlog2e + vmagic_bias;
float vs0 = uint32_as_float(float_as_uint32(vn0) << 23);
vn0 -= vmagic_bias;
float vs1 = uint32_as_float(float_as_uint32(vn1) << 23);
vn1 -= vmagic_bias;
float vs2 = uint32_as_float(float_as_uint32(vn2) << 23);
vn2 -= vmagic_bias;
float vs3 = uint32_as_float(float_as_uint32(vn3) << 23);
vn3 -= vmagic_bias;
float vs4 = uint32_as_float(float_as_uint32(vn4) << 23);
vn4 -= vmagic_bias;
float vt0 = vn0 * vminus_ln2_hi + vz0;
float vt1 = vn1 * vminus_ln2_hi + vz1;
float vt2 = vn2 * vminus_ln2_hi + vz2;
float vt3 = vn3 * vminus_ln2_hi + vz3;
float vt4 = vn4 * vminus_ln2_hi + vz4;
vt0 = vn0 * vminus_ln2_lo + vt0;
vt1 = vn1 * vminus_ln2_lo + vt1;
vt2 = vn2 * vminus_ln2_lo + vt2;
vt3 = vn3 * vminus_ln2_lo + vt3;
vt4 = vn4 * vminus_ln2_lo + vt4;
float vp0 = vc6 * vt0 + vc5;
float vp1 = vc6 * vt1 + vc5;
float vp2 = vc6 * vt2 + vc5;
float vp3 = vc6 * vt3 + vc5;
float vp4 = vc6 * vt4 + vc5;
vp0 = vp0 * vt0 + vc4;
vp1 = vp1 * vt1 + vc4;
vp2 = vp2 * vt2 + vc4;
vp3 = vp3 * vt3 + vc4;
vp4 = vp4 * vt4 + vc4;
vp0 = vp0 * vt0 + vc3;
vp1 = vp1 * vt1 + vc3;
vp2 = vp2 * vt2 + vc3;
vp3 = vp3 * vt3 + vc3;
vp4 = vp4 * vt4 + vc3;
vp0 = vp0 * vt0 + vc2;
vp1 = vp1 * vt1 + vc2;
vp2 = vp2 * vt2 + vc2;
vp3 = vp3 * vt3 + vc2;
vp4 = vp4 * vt4 + vc2;
vp0 *= vt0;
vp1 *= vt1;
vp2 *= vt2;
vp3 *= vt3;
vp4 *= vt4;
vt0 *= vs0;
vs0 -= vone;
vt1 *= vs1;
vs1 -= vone;
vt2 *= vs2;
vs2 -= vone;
vt3 *= vs3;
vs3 -= vone;
vt4 *= vs4;
vs4 -= vone;
vp0 = vp0 * vt0 + vt0;
vp1 = vp1 * vt1 + vt1;
vp2 = vp2 * vt2 + vt2;
vp3 = vp3 * vt3 + vt3;
vp4 = vp4 * vt4 + vt4;
const float ve0 = (vp0 + vs0) * valpha;
float vy0 = __builtin_wasm_max_f32(vx0 * vbeta, 0.0f);
const float ve1 = (vp1 + vs1) * valpha;
float vy1 = __builtin_wasm_max_f32(vx1 * vbeta, 0.0f);
const float ve2 = (vp2 + vs2) * valpha;
float vy2 = __builtin_wasm_max_f32(vx2 * vbeta, 0.0f);
const float ve3 = (vp3 + vs3) * valpha;
float vy3 = __builtin_wasm_max_f32(vx3 * vbeta, 0.0f);
const float ve4 = (vp4 + vs4) * valpha;
float vy4 = __builtin_wasm_max_f32(vx4 * vbeta, 0.0f);
vy0 += __builtin_wasm_min_f32(ve0, 0.0f);
vy1 += __builtin_wasm_min_f32(ve1, 0.0f);
vy2 += __builtin_wasm_min_f32(ve2, 0.0f);
vy3 += __builtin_wasm_min_f32(ve3, 0.0f);
vy4 += __builtin_wasm_min_f32(ve4, 0.0f);
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output[4] = vy4;
output += 5;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
float vn = vz * vlog2e + vmagic_bias;
float vs = uint32_as_float(float_as_uint32(vn) << 23);
vn -= vmagic_bias;
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
vt *= vs;
vs -= vone;
vp = vp * vt + vt;
const float ve = (vp + vs) * valpha;
float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
vy += __builtin_wasm_min_f32(ve, 0.0f);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 5,875 | 29.445596 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasm-rr2-p6-x6.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/scalar-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__wasm_rr2_p6_x6(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vprescale = params->scalar_rr2_p6.prescale;
const float valpha = params->scalar_rr2_p6.alpha;
const float vbeta = params->scalar_rr2_p6.beta;
const float vmagic_bias = params->scalar_rr2_p6.magic_bias;
const float vlog2e = params->scalar_rr2_p6.log2e;
const float vsat_cutoff = params->scalar_rr2_p6.sat_cutoff;
const float vminus_ln2_hi = params->scalar_rr2_p6.minus_ln2_hi;
const float vminus_ln2_lo = params->scalar_rr2_p6.minus_ln2_lo;
const float vc6 = params->scalar_rr2_p6.c6;
const float vc5 = params->scalar_rr2_p6.c5;
const float vc4 = params->scalar_rr2_p6.c4;
const float vc3 = params->scalar_rr2_p6.c3;
const float vc2 = params->scalar_rr2_p6.c2;
const float vone = params->scalar_rr2_p6.one;
for (; batch >= 6 * sizeof(float); batch -= 6 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
float vx3 = input[3];
float vx4 = input[4];
float vx5 = input[5];
input += 6;
const float vz0 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx0 * vprescale, vsat_cutoff), 0.0f);
const float vz1 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx1 * vprescale, vsat_cutoff), 0.0f);
const float vz2 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx2 * vprescale, vsat_cutoff), 0.0f);
const float vz3 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx3 * vprescale, vsat_cutoff), 0.0f);
const float vz4 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx4 * vprescale, vsat_cutoff), 0.0f);
const float vz5 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx5 * vprescale, vsat_cutoff), 0.0f);
float vn0 = vz0 * vlog2e + vmagic_bias;
float vn1 = vz1 * vlog2e + vmagic_bias;
float vn2 = vz2 * vlog2e + vmagic_bias;
float vn3 = vz3 * vlog2e + vmagic_bias;
float vn4 = vz4 * vlog2e + vmagic_bias;
float vn5 = vz5 * vlog2e + vmagic_bias;
float vs0 = uint32_as_float(float_as_uint32(vn0) << 23);
vn0 -= vmagic_bias;
float vs1 = uint32_as_float(float_as_uint32(vn1) << 23);
vn1 -= vmagic_bias;
float vs2 = uint32_as_float(float_as_uint32(vn2) << 23);
vn2 -= vmagic_bias;
float vs3 = uint32_as_float(float_as_uint32(vn3) << 23);
vn3 -= vmagic_bias;
float vs4 = uint32_as_float(float_as_uint32(vn4) << 23);
vn4 -= vmagic_bias;
float vs5 = uint32_as_float(float_as_uint32(vn5) << 23);
vn5 -= vmagic_bias;
float vt0 = vn0 * vminus_ln2_hi + vz0;
float vt1 = vn1 * vminus_ln2_hi + vz1;
float vt2 = vn2 * vminus_ln2_hi + vz2;
float vt3 = vn3 * vminus_ln2_hi + vz3;
float vt4 = vn4 * vminus_ln2_hi + vz4;
float vt5 = vn5 * vminus_ln2_hi + vz5;
vt0 = vn0 * vminus_ln2_lo + vt0;
vt1 = vn1 * vminus_ln2_lo + vt1;
vt2 = vn2 * vminus_ln2_lo + vt2;
vt3 = vn3 * vminus_ln2_lo + vt3;
vt4 = vn4 * vminus_ln2_lo + vt4;
vt5 = vn5 * vminus_ln2_lo + vt5;
float vp0 = vc6 * vt0 + vc5;
float vp1 = vc6 * vt1 + vc5;
float vp2 = vc6 * vt2 + vc5;
float vp3 = vc6 * vt3 + vc5;
float vp4 = vc6 * vt4 + vc5;
float vp5 = vc6 * vt5 + vc5;
vp0 = vp0 * vt0 + vc4;
vp1 = vp1 * vt1 + vc4;
vp2 = vp2 * vt2 + vc4;
vp3 = vp3 * vt3 + vc4;
vp4 = vp4 * vt4 + vc4;
vp5 = vp5 * vt5 + vc4;
vp0 = vp0 * vt0 + vc3;
vp1 = vp1 * vt1 + vc3;
vp2 = vp2 * vt2 + vc3;
vp3 = vp3 * vt3 + vc3;
vp4 = vp4 * vt4 + vc3;
vp5 = vp5 * vt5 + vc3;
vp0 = vp0 * vt0 + vc2;
vp1 = vp1 * vt1 + vc2;
vp2 = vp2 * vt2 + vc2;
vp3 = vp3 * vt3 + vc2;
vp4 = vp4 * vt4 + vc2;
vp5 = vp5 * vt5 + vc2;
vp0 *= vt0;
vp1 *= vt1;
vp2 *= vt2;
vp3 *= vt3;
vp4 *= vt4;
vp5 *= vt5;
vt0 *= vs0;
vs0 -= vone;
vt1 *= vs1;
vs1 -= vone;
vt2 *= vs2;
vs2 -= vone;
vt3 *= vs3;
vs3 -= vone;
vt4 *= vs4;
vs4 -= vone;
vt5 *= vs5;
vs5 -= vone;
vp0 = vp0 * vt0 + vt0;
vp1 = vp1 * vt1 + vt1;
vp2 = vp2 * vt2 + vt2;
vp3 = vp3 * vt3 + vt3;
vp4 = vp4 * vt4 + vt4;
vp5 = vp5 * vt5 + vt5;
const float ve0 = (vp0 + vs0) * valpha;
float vy0 = __builtin_wasm_max_f32(vx0 * vbeta, 0.0f);
const float ve1 = (vp1 + vs1) * valpha;
float vy1 = __builtin_wasm_max_f32(vx1 * vbeta, 0.0f);
const float ve2 = (vp2 + vs2) * valpha;
float vy2 = __builtin_wasm_max_f32(vx2 * vbeta, 0.0f);
const float ve3 = (vp3 + vs3) * valpha;
float vy3 = __builtin_wasm_max_f32(vx3 * vbeta, 0.0f);
const float ve4 = (vp4 + vs4) * valpha;
float vy4 = __builtin_wasm_max_f32(vx4 * vbeta, 0.0f);
const float ve5 = (vp5 + vs5) * valpha;
float vy5 = __builtin_wasm_max_f32(vx5 * vbeta, 0.0f);
vy0 += __builtin_wasm_min_f32(ve0, 0.0f);
vy1 += __builtin_wasm_min_f32(ve1, 0.0f);
vy2 += __builtin_wasm_min_f32(ve2, 0.0f);
vy3 += __builtin_wasm_min_f32(ve3, 0.0f);
vy4 += __builtin_wasm_min_f32(ve4, 0.0f);
vy5 += __builtin_wasm_min_f32(ve5, 0.0f);
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output[4] = vy4;
output[5] = vy5;
output += 6;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
float vn = vz * vlog2e + vmagic_bias;
float vs = uint32_as_float(float_as_uint32(vn) << 23);
vn -= vmagic_bias;
float vt = vn * vminus_ln2_hi + vz;
vt = vn * vminus_ln2_lo + vt;
float vp = vc6 * vt + vc5;
vp = vp * vt + vc4;
vp = vp * vt + vc3;
vp = vp * vt + vc2;
vp *= vt;
vt *= vs;
vs -= vone;
vp = vp * vt + vt;
const float ve = (vp + vs) * valpha;
float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
vy += __builtin_wasm_min_f32(ve, 0.0f);
*output++ = vy;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 6,576 | 30.023585 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmrelaxedsimd-fma-rr2-lut16-p3-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasmrelaxedsimd_fma_rr2_lut16_p3_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.index_mask);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_lo);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.one);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
input += 12;
const v128_t vz0123 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
v128_t vn0123 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz0123, vlog2e));
v128_t vn4567 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz4567, vlog2e));
v128_t vn89AB = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz89AB, vlog2e));
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ven89AB = wasm_i32x4_shl(vn89AB, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx8));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxB), vl89AB, 3);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vs89AB = wasm_i32x4_add(vl89AB, ven89AB);
v128_t vt0123 = wasm_f32x4_add(vz0123, wasm_f32x4_mul(vn0123, vminus_ln2_hi));
v128_t vt4567 = wasm_f32x4_add(vz4567, wasm_f32x4_mul(vn4567, vminus_ln2_hi));
v128_t vt89AB = wasm_f32x4_add(vz89AB, wasm_f32x4_mul(vn89AB, vminus_ln2_hi));
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
v128_t vp0123 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt0123));
v128_t vp4567 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt4567));
v128_t vp89AB = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt89AB));
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vp0123 = wasm_f32x4_add(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_add(vt4567, wasm_f32x4_mul(vp4567, vt4567));
vp89AB = wasm_f32x4_add(vt89AB, wasm_f32x4_mul(vp89AB, vt89AB));
const v128_t ve0123 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp0123, vs0123));
const v128_t ve4567 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp4567, vs4567));
const v128_t ve89AB = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp89AB, vs89AB));
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vy0123 = __builtin_wasm_relaxed_laneselect_i32x4(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = __builtin_wasm_relaxed_laneselect_i32x4(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = __builtin_wasm_relaxed_laneselect_i32x4(ve89AB, vx89AB, vsignm89AB);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 11,792 | 48.759494 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmrelaxedsimd-fma-rr2-lut16-p3-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasmrelaxedsimd_fma_rr2_lut16_p3_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.index_mask);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_lo);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
const v128_t vz0123 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
const v128_t vzCDEF = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vxCDEF, vprescale));
v128_t vn0123 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz0123, vlog2e));
v128_t vn4567 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz4567, vlog2e));
v128_t vn89AB = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz89AB, vlog2e));
v128_t vnCDEF = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzCDEF, vlog2e));
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ven89AB = wasm_i32x4_shl(vn89AB, 19);
const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
const v128_t venCDEF = wasm_i32x4_shl(vnCDEF, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx8));
const uint32_t vidxC = wasm_u32x4_extract_lane(vidxCDEF, 0);
v128_t vlCDEF = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxC));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidxD = wasm_u32x4_extract_lane(vidxCDEF, 1);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxD), vlCDEF, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidxE = wasm_u32x4_extract_lane(vidxCDEF, 2);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxE), vlCDEF, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxB), vl89AB, 3);
const uint32_t vidxF = wasm_u32x4_extract_lane(vidxCDEF, 3);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxF), vlCDEF, 3);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vs89AB = wasm_i32x4_add(vl89AB, ven89AB);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vsCDEF = wasm_i32x4_add(vlCDEF, venCDEF);
v128_t vt0123 = wasm_f32x4_add(vz0123, wasm_f32x4_mul(vn0123, vminus_ln2_hi));
v128_t vt4567 = wasm_f32x4_add(vz4567, wasm_f32x4_mul(vn4567, vminus_ln2_hi));
v128_t vt89AB = wasm_f32x4_add(vz89AB, wasm_f32x4_mul(vn89AB, vminus_ln2_hi));
v128_t vtCDEF = wasm_f32x4_add(vzCDEF, wasm_f32x4_mul(vnCDEF, vminus_ln2_hi));
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
v128_t vp0123 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt0123));
v128_t vp4567 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt4567));
v128_t vp89AB = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt89AB));
v128_t vpCDEF = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vtCDEF));
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
vp0123 = wasm_f32x4_add(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_add(vt4567, wasm_f32x4_mul(vp4567, vt4567));
vp89AB = wasm_f32x4_add(vt89AB, wasm_f32x4_mul(vp89AB, vt89AB));
vpCDEF = wasm_f32x4_add(vtCDEF, wasm_f32x4_mul(vpCDEF, vtCDEF));
const v128_t ve0123 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp0123, vs0123));
const v128_t ve4567 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp4567, vs4567));
const v128_t ve89AB = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp89AB, vs89AB));
const v128_t veCDEF = wasm_f32x4_mul(valpha, wasm_f32x4_add(vpCDEF, vsCDEF));
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
const v128_t vy0123 = __builtin_wasm_relaxed_laneselect_i32x4(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = __builtin_wasm_relaxed_laneselect_i32x4(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = __builtin_wasm_relaxed_laneselect_i32x4(ve89AB, vx89AB, vsignm89AB);
const v128_t vyCDEF = __builtin_wasm_relaxed_laneselect_i32x4(veCDEF, vxCDEF, vsignmCDEF);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 13,778 | 51.193182 | 124 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.