repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-minmax-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_minmax_ukernel__scalar_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
const float vb = *input_b;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
float vacc0 = va0 - vb;
float vacc1 = va1 - vb;
float vacc2 = va2 - vb;
float vacc3 = va3 - vb;
vacc0 = math_max_f32(vacc0, voutput_min);
vacc1 = math_max_f32(vacc1, voutput_min);
vacc2 = math_max_f32(vacc2, voutput_min);
vacc3 = math_max_f32(vacc3, voutput_min);
vacc0 = math_min_f32(vacc0, voutput_max);
vacc1 = math_min_f32(vacc1, voutput_max);
vacc2 = math_min_f32(vacc2, voutput_max);
vacc3 = math_min_f32(vacc3, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va - vb;
vacc = math_max_f32(vacc, voutput_min);
vacc = math_min_f32(vacc, voutput_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,968 | 25.608108 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-minmax-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_minmax_ukernel__scalar_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
const float vb = *input_b;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
float vacc0 = va0 - vb;
float vacc1 = va1 - vb;
float vacc2 = va2 - vb;
float vacc3 = va3 - vb;
float vacc4 = va4 - vb;
float vacc5 = va5 - vb;
float vacc6 = va6 - vb;
float vacc7 = va7 - vb;
vacc0 = math_max_f32(vacc0, voutput_min);
vacc1 = math_max_f32(vacc1, voutput_min);
vacc2 = math_max_f32(vacc2, voutput_min);
vacc3 = math_max_f32(vacc3, voutput_min);
vacc4 = math_max_f32(vacc4, voutput_min);
vacc5 = math_max_f32(vacc5, voutput_min);
vacc6 = math_max_f32(vacc6, voutput_min);
vacc7 = math_max_f32(vacc7, voutput_min);
vacc0 = math_min_f32(vacc0, voutput_max);
vacc1 = math_min_f32(vacc1, voutput_max);
vacc2 = math_min_f32(vacc2, voutput_max);
vacc3 = math_min_f32(vacc3, voutput_max);
vacc4 = math_min_f32(vacc4, voutput_max);
vacc5 = math_min_f32(vacc5, voutput_max);
vacc6 = math_min_f32(vacc6, voutput_max);
vacc7 = math_min_f32(vacc7, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va - vb;
vacc = math_max_f32(vacc, voutput_min);
vacc = math_min_f32(vacc, voutput_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,676 | 27.478723 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-minmax-sse-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_minmax_ukernel__sse_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128 voutput_min = _mm_load_ps(params->sse.min);
const __m128 voutput_max = _mm_load_ps(params->sse.max);
const __m128 vb = _mm_load1_ps(input_b);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 va = _mm_loadu_ps(input_a);
input_a += 4;
__m128 vacc = _mm_sub_ps(va, vb);
vacc = _mm_max_ps(vacc, voutput_min);
vacc = _mm_min_ps(vacc, voutput_max);
_mm_storeu_ps(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 va = _mm_loadu_ps(input_a);
__m128 vacc = _mm_sub_ps(va, vb);
vacc = _mm_max_ps(vacc, voutput_min);
vacc = _mm_min_ps(vacc, voutput_max);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 1,691 | 25.857143 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-minmax-sse-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_minmax_ukernel__sse_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128 voutput_min = _mm_load_ps(params->sse.min);
const __m128 voutput_max = _mm_load_ps(params->sse.max);
const __m128 vb = _mm_load1_ps(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(input_a);
const __m128 va1 = _mm_loadu_ps(input_a + 4);
input_a += 8;
__m128 vacc0 = _mm_sub_ps(va0, vb);
__m128 vacc1 = _mm_sub_ps(va1, vb);
vacc0 = _mm_max_ps(vacc0, voutput_min);
vacc1 = _mm_max_ps(vacc1, voutput_min);
vacc0 = _mm_min_ps(vacc0, voutput_max);
vacc1 = _mm_min_ps(vacc1, voutput_max);
_mm_storeu_ps(output, vacc0);
_mm_storeu_ps(output + 4, vacc1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 va = _mm_loadu_ps(input_a);
input_a += 4;
__m128 vacc = _mm_sub_ps(va, vb);
vacc = _mm_max_ps(vacc, voutput_min);
vacc = _mm_min_ps(vacc, voutput_max);
_mm_storeu_ps(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 va = _mm_loadu_ps(input_a);
__m128 vacc = _mm_sub_ps(va, vb);
vacc = _mm_max_ps(vacc, voutput_min);
vacc = _mm_min_ps(vacc, voutput_max);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 2,226 | 26.158537 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-minmax-wasm-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_minmax_ukernel__wasm_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
const float vb = *input_b;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
float vacc = va - vb;
vacc = __builtin_wasm_max_f32(vacc, voutput_min);
vacc = __builtin_wasm_min_f32(vacc, voutput_max);
*output++ = vacc;
}
}
| 1,119 | 25.666667 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-minmax-wasm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_minmax_ukernel__wasm_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
const float vb = *input_b;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
float vacc0 = va0 - vb;
float vacc1 = va1 - vb;
vacc0 = __builtin_wasm_max_f32(vacc0, voutput_min);
vacc1 = __builtin_wasm_max_f32(vacc1, voutput_min);
vacc0 = __builtin_wasm_min_f32(vacc0, voutput_max);
vacc1 = __builtin_wasm_min_f32(vacc1, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
float vacc = va - vb;
vacc = __builtin_wasm_max_f32(vacc, voutput_min);
vacc = __builtin_wasm_min_f32(vacc, voutput_max);
*output = vacc;
}
}
| 1,629 | 25.290323 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-minmax-wasm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_minmax_ukernel__wasm_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
const float vb = *input_b;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
float vacc0 = va0 - vb;
float vacc1 = va1 - vb;
float vacc2 = va2 - vb;
float vacc3 = va3 - vb;
vacc0 = __builtin_wasm_max_f32(vacc0, voutput_min);
vacc1 = __builtin_wasm_max_f32(vacc1, voutput_min);
vacc2 = __builtin_wasm_max_f32(vacc2, voutput_min);
vacc3 = __builtin_wasm_max_f32(vacc3, voutput_min);
vacc0 = __builtin_wasm_min_f32(vacc0, voutput_max);
vacc1 = __builtin_wasm_min_f32(vacc1, voutput_max);
vacc2 = __builtin_wasm_min_f32(vacc2, voutput_max);
vacc3 = __builtin_wasm_min_f32(vacc3, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va - vb;
vacc = __builtin_wasm_max_f32(vacc, voutput_min);
vacc = __builtin_wasm_min_f32(vacc, voutput_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,066 | 26.932432 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-minmax-wasm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_minmax_ukernel__wasm_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
const float vb = *input_b;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
float vacc0 = va0 - vb;
float vacc1 = va1 - vb;
float vacc2 = va2 - vb;
float vacc3 = va3 - vb;
float vacc4 = va4 - vb;
float vacc5 = va5 - vb;
float vacc6 = va6 - vb;
float vacc7 = va7 - vb;
vacc0 = __builtin_wasm_max_f32(vacc0, voutput_min);
vacc1 = __builtin_wasm_max_f32(vacc1, voutput_min);
vacc2 = __builtin_wasm_max_f32(vacc2, voutput_min);
vacc3 = __builtin_wasm_max_f32(vacc3, voutput_min);
vacc4 = __builtin_wasm_max_f32(vacc4, voutput_min);
vacc5 = __builtin_wasm_max_f32(vacc5, voutput_min);
vacc6 = __builtin_wasm_max_f32(vacc6, voutput_min);
vacc7 = __builtin_wasm_max_f32(vacc7, voutput_min);
vacc0 = __builtin_wasm_min_f32(vacc0, voutput_max);
vacc1 = __builtin_wasm_min_f32(vacc1, voutput_max);
vacc2 = __builtin_wasm_min_f32(vacc2, voutput_max);
vacc3 = __builtin_wasm_min_f32(vacc3, voutput_max);
vacc4 = __builtin_wasm_min_f32(vacc4, voutput_max);
vacc5 = __builtin_wasm_min_f32(vacc5, voutput_max);
vacc6 = __builtin_wasm_min_f32(vacc6, voutput_max);
vacc7 = __builtin_wasm_min_f32(vacc7, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va - vb;
vacc = __builtin_wasm_max_f32(vacc, voutput_min);
vacc = __builtin_wasm_min_f32(vacc, voutput_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,854 | 29.37234 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-minmax-wasmsimd-arm-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_minmax_ukernel__wasmsimd_arm_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
v128_t vy0 = wasm_f32x4_sub(va0, vb);
v128_t vy1 = wasm_f32x4_sub(va1, vb);
v128_t vy2 = wasm_f32x4_sub(va2, vb);
v128_t vy3 = wasm_f32x4_sub(va3, vb);
vy0 = wasm_f32x4_max(vy0, voutput_min);
vy1 = wasm_f32x4_max(vy1, voutput_min);
vy2 = wasm_f32x4_max(vy2, voutput_min);
vy3 = wasm_f32x4_max(vy3, voutput_min);
vy0 = wasm_f32x4_min(vy0, voutput_max);
vy1 = wasm_f32x4_min(vy1, voutput_max);
vy2 = wasm_f32x4_min(vy2, voutput_max);
vy3 = wasm_f32x4_min(vy3, voutput_max);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
wasm_v128_store(output + 8, vy2);
wasm_v128_store(output + 12, vy3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_sub(va, vb);
vy = wasm_f32x4_max(vy, voutput_min);
vy = wasm_f32x4_min(vy, voutput_max);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_sub(va, vb);
vy = wasm_f32x4_max(vy, voutput_min);
vy = wasm_f32x4_min(vy, voutput_max);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,727 | 28.021277 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-minmax-wasmsimd-arm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_minmax_ukernel__wasmsimd_arm_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_sub(va, vb);
vy = wasm_f32x4_max(vy, voutput_min);
vy = wasm_f32x4_min(vy, voutput_max);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_sub(va, vb);
vy = wasm_f32x4_max(vy, voutput_min);
vy = wasm_f32x4_min(vy, voutput_max);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,737 | 25.738462 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-minmax-wasmsimd-arm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_minmax_ukernel__wasmsimd_arm_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
v128_t vy0 = wasm_f32x4_sub(va0, vb);
v128_t vy1 = wasm_f32x4_sub(va1, vb);
vy0 = wasm_f32x4_max(vy0, voutput_min);
vy1 = wasm_f32x4_max(vy1, voutput_min);
vy0 = wasm_f32x4_min(vy0, voutput_max);
vy1 = wasm_f32x4_min(vy1, voutput_max);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_sub(va, vb);
vy = wasm_f32x4_max(vy, voutput_min);
vy = wasm_f32x4_min(vy, voutput_max);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_sub(va, vb);
vy = wasm_f32x4_max(vy, voutput_min);
vy = wasm_f32x4_min(vy, voutput_max);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,280 | 26.154762 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-minmax-wasmsimd-x86-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_minmax_ukernel__wasmsimd_x86_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
v128_t vy0 = wasm_f32x4_sub(va0, vb);
v128_t vy1 = wasm_f32x4_sub(va1, vb);
v128_t vy2 = wasm_f32x4_sub(va2, vb);
v128_t vy3 = wasm_f32x4_sub(va3, vb);
vy0 = wasm_f32x4_pmax(voutput_min, vy0);
vy1 = wasm_f32x4_pmax(voutput_min, vy1);
vy2 = wasm_f32x4_pmax(voutput_min, vy2);
vy3 = wasm_f32x4_pmax(voutput_min, vy3);
vy0 = wasm_f32x4_pmin(voutput_max, vy0);
vy1 = wasm_f32x4_pmin(voutput_max, vy1);
vy2 = wasm_f32x4_pmin(voutput_max, vy2);
vy3 = wasm_f32x4_pmin(voutput_max, vy3);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
wasm_v128_store(output + 8, vy2);
wasm_v128_store(output + 12, vy3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_sub(va, vb);
vy = wasm_f32x4_pmax(voutput_min, vy);
vy = wasm_f32x4_pmin(voutput_max, vy);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_sub(va, vb);
vy = wasm_f32x4_pmax(voutput_min, vy);
vy = wasm_f32x4_pmin(voutput_max, vy);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,739 | 28.148936 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-minmax-wasmsimd-x86-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_minmax_ukernel__wasmsimd_x86_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_sub(va, vb);
vy = wasm_f32x4_pmax(voutput_min, vy);
vy = wasm_f32x4_pmin(voutput_max, vy);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_sub(va, vb);
vy = wasm_f32x4_pmax(voutput_min, vy);
vy = wasm_f32x4_pmin(voutput_max, vy);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,741 | 25.8 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-minmax-wasmsimd-x86-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_minmax_ukernel__wasmsimd_x86_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
v128_t vy0 = wasm_f32x4_sub(va0, vb);
v128_t vy1 = wasm_f32x4_sub(va1, vb);
vy0 = wasm_f32x4_pmax(voutput_min, vy0);
vy1 = wasm_f32x4_pmax(voutput_min, vy1);
vy0 = wasm_f32x4_pmin(voutput_max, vy0);
vy1 = wasm_f32x4_pmin(voutput_max, vy1);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_sub(va, vb);
vy = wasm_f32x4_pmax(voutput_min, vy);
vy = wasm_f32x4_pmin(voutput_max, vy);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_sub(va, vb);
vy = wasm_f32x4_pmax(voutput_min, vy);
vy = wasm_f32x4_pmin(voutput_max, vy);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,288 | 26.25 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-relu-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_relu_ukernel__scalar_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
float vacc = va - vb;
vacc = math_max_f32(vacc, 0.0f);
*output++ = vacc;
}
}
| 950 | 23.384615 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-relu-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_relu_ukernel__scalar_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
float vacc0 = va0 - vb;
float vacc1 = va1 - vb;
vacc0 = math_max_f32(vacc0, 0.0f);
vacc1 = math_max_f32(vacc1, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
float vacc = va - vb;
vacc = math_max_f32(vacc, 0.0f);
*output = vacc;
}
}
| 1,313 | 22.464286 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-relu-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_relu_ukernel__scalar_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
float vacc0 = va0 - vb;
float vacc1 = va1 - vb;
float vacc2 = va2 - vb;
float vacc3 = va3 - vb;
vacc0 = math_max_f32(vacc0, 0.0f);
vacc1 = math_max_f32(vacc1, 0.0f);
vacc2 = math_max_f32(vacc2, 0.0f);
vacc3 = math_max_f32(vacc3, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va - vb;
vacc = math_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,602 | 23.287879 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-relu-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_relu_ukernel__scalar_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
float vacc0 = va0 - vb;
float vacc1 = va1 - vb;
float vacc2 = va2 - vb;
float vacc3 = va3 - vb;
float vacc4 = va4 - vb;
float vacc5 = va5 - vb;
float vacc6 = va6 - vb;
float vacc7 = va7 - vb;
vacc0 = math_max_f32(vacc0, 0.0f);
vacc1 = math_max_f32(vacc1, 0.0f);
vacc2 = math_max_f32(vacc2, 0.0f);
vacc3 = math_max_f32(vacc3, 0.0f);
vacc4 = math_max_f32(vacc4, 0.0f);
vacc5 = math_max_f32(vacc5, 0.0f);
vacc6 = math_max_f32(vacc6, 0.0f);
vacc7 = math_max_f32(vacc7, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va - vb;
vacc = math_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,098 | 24.597561 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-relu-wasm-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_relu_ukernel__wasm_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
float vacc = va - vb;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output++ = vacc;
}
}
| 958 | 23.589744 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-relu-wasm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_relu_ukernel__wasm_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
float vacc0 = va0 - vb;
float vacc1 = va1 - vb;
vacc0 = __builtin_wasm_max_f32(vacc0, 0.0f);
vacc1 = __builtin_wasm_max_f32(vacc1, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
float vacc = va - vb;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output = vacc;
}
}
| 1,341 | 22.964286 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-relu-wasm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_relu_ukernel__wasm_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
float vacc0 = va0 - vb;
float vacc1 = va1 - vb;
float vacc2 = va2 - vb;
float vacc3 = va3 - vb;
vacc0 = __builtin_wasm_max_f32(vacc0, 0.0f);
vacc1 = __builtin_wasm_max_f32(vacc1, 0.0f);
vacc2 = __builtin_wasm_max_f32(vacc2, 0.0f);
vacc3 = __builtin_wasm_max_f32(vacc3, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va - vb;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,650 | 24.015152 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-relu-wasm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_relu_ukernel__wasm_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
float vacc0 = va0 - vb;
float vacc1 = va1 - vb;
float vacc2 = va2 - vb;
float vacc3 = va3 - vb;
float vacc4 = va4 - vb;
float vacc5 = va5 - vb;
float vacc6 = va6 - vb;
float vacc7 = va7 - vb;
vacc0 = __builtin_wasm_max_f32(vacc0, 0.0f);
vacc1 = __builtin_wasm_max_f32(vacc1, 0.0f);
vacc2 = __builtin_wasm_max_f32(vacc2, 0.0f);
vacc3 = __builtin_wasm_max_f32(vacc3, 0.0f);
vacc4 = __builtin_wasm_max_f32(vacc4, 0.0f);
vacc5 = __builtin_wasm_max_f32(vacc5, 0.0f);
vacc6 = __builtin_wasm_max_f32(vacc6, 0.0f);
vacc7 = __builtin_wasm_max_f32(vacc7, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va - vb;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,186 | 25.670732 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-relu-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_relu_ukernel__wasmsimd_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vzero = wasm_i32x4_const_splat(0);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
v128_t vy0 = wasm_f32x4_sub(va0, vb);
v128_t vy1 = wasm_f32x4_sub(va1, vb);
v128_t vy2 = wasm_f32x4_sub(va2, vb);
v128_t vy3 = wasm_f32x4_sub(va3, vb);
vy0 = wasm_i32x4_max(vy0, vzero);
vy1 = wasm_i32x4_max(vy1, vzero);
vy2 = wasm_i32x4_max(vy2, vzero);
vy3 = wasm_i32x4_max(vy3, vzero);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
wasm_v128_store(output + 8, vy2);
wasm_v128_store(output + 12, vy3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_sub(va, vb);
vy = wasm_i32x4_max(vy, vzero);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_sub(va, vb);
vy = wasm_i32x4_max(vy, vzero);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,322 | 26.011628 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-relu-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_relu_ukernel__wasmsimd_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vzero = wasm_i32x4_const_splat(0);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_sub(va, vb);
vy = wasm_i32x4_max(vy, vzero);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_sub(va, vb);
vy = wasm_i32x4_max(vy, vzero);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,533 | 23.741935 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-relu-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_relu_ukernel__wasmsimd_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vzero = wasm_i32x4_const_splat(0);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
v128_t vy0 = wasm_f32x4_sub(va0, vb);
v128_t vy1 = wasm_f32x4_sub(va1, vb);
vy0 = wasm_i32x4_max(vy0, vzero);
vy1 = wasm_i32x4_max(vy1, vzero);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_sub(va, vb);
vy = wasm_i32x4_max(vy, vzero);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_sub(va, vb);
vy = wasm_i32x4_max(vy, vzero);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,975 | 24.333333 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_ukernel__scalar_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
float vacc = va - vb;
*output++ = vacc;
}
}
| 911 | 23 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_ukernel__scalar_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
float vacc0 = va0 - vb;
float vacc1 = va1 - vb;
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
float vacc = va - vb;
*output = vacc;
}
}
| 1,196 | 21.584906 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_ukernel__scalar_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
float vacc0 = va0 - vb;
float vacc1 = va1 - vb;
float vacc2 = va2 - vb;
float vacc3 = va3 - vb;
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va - vb;
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,405 | 22.04918 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_ukernel__scalar_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
float vacc0 = va0 - vb;
float vacc1 = va1 - vb;
float vacc2 = va2 - vb;
float vacc3 = va3 - vb;
float vacc4 = va4 - vb;
float vacc5 = va5 - vb;
float vacc6 = va6 - vb;
float vacc7 = va7 - vb;
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va - vb;
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,745 | 22.917808 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_ukernel__wasmsimd_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
v128_t vy0 = wasm_f32x4_sub(va0, vb);
v128_t vy1 = wasm_f32x4_sub(va1, vb);
v128_t vy2 = wasm_f32x4_sub(va2, vb);
v128_t vy3 = wasm_f32x4_sub(va3, vb);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
wasm_v128_store(output + 8, vy2);
wasm_v128_store(output + 12, vy3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_sub(va, vb);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_sub(va, vb);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,046 | 24.911392 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_ukernel__wasmsimd_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_sub(va, vb);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_sub(va, vb);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,409 | 22.898305 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vsubc-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vsubc_ukernel__wasmsimd_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
v128_t vy0 = wasm_f32x4_sub(va0, vb);
v128_t vy1 = wasm_f32x4_sub(va1, vb);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_sub(va, vb);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_sub(va, vb);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,775 | 23.328767 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vclamp/gen/f32-vclamp-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vclamp/avx.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vclamp_ukernel__avx_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m256 vacc01234567 = _mm256_loadu_ps(input);
__m256 vacc89ABCDEF = _mm256_loadu_ps(input + 8);
input += 16;
vacc01234567 = _mm256_max_ps(vmin, vacc01234567);
vacc89ABCDEF = _mm256_max_ps(vmin, vacc89ABCDEF);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
vacc89ABCDEF = _mm256_min_ps(vmax, vacc89ABCDEF);
_mm256_storeu_ps(output, vacc01234567);
_mm256_storeu_ps(output + 8, vacc89ABCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vacc = _mm256_loadu_ps(input);
input += 8;
vacc = _mm256_max_ps(vmin, vacc);
vacc = _mm256_min_ps(vmax, vacc);
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vacc = _mm256_maskload_ps(input, vmask);
vacc = _mm256_max_ps(vmin, vacc);
vacc = _mm256_min_ps(vmax, vacc);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 2,362 | 27.817073 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vclamp/gen/f32-vclamp-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vclamp/avx.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vclamp_ukernel__avx_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vmin = _mm256_load_ps(params->avx.min);
const __m256 vmax = _mm256_load_ps(params->avx.max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vacc01234567 = _mm256_loadu_ps(input);
input += 8;
vacc01234567 = _mm256_max_ps(vmin, vacc01234567);
vacc01234567 = _mm256_min_ps(vmax, vacc01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vacc = _mm256_maskload_ps(input, vmask);
vacc = _mm256_max_ps(vmin, vacc);
vacc = _mm256_min_ps(vmax, vacc);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 1,887 | 26.764706 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vclamp/gen/f32-vclamp-avx512f-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vclamp/avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vclamp_ukernel__avx512f_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc0123456789ABCDEF = _mm512_loadu_ps(input);
input += 16;
vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEF);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input);
vacc = _mm512_max_ps(vmin, vacc);
vacc = _mm512_min_ps(vmax, vacc);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 1,699 | 28.824561 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vclamp/gen/f32-vclamp-avx512f-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vclamp/avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vclamp_ukernel__avx512f_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vmin = _mm512_set1_ps(params->scalar.min);
const __m512 vmax = _mm512_set1_ps(params->scalar.max);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m512 vacc0123456789ABCDEF = _mm512_loadu_ps(input);
__m512 vaccGHIJKLMNOPQRSTUV = _mm512_loadu_ps(input + 16);
input += 32;
vacc0123456789ABCDEF = _mm512_max_ps(vmin, vacc0123456789ABCDEF);
vaccGHIJKLMNOPQRSTUV = _mm512_max_ps(vmin, vaccGHIJKLMNOPQRSTUV);
vacc0123456789ABCDEF = _mm512_min_ps(vmax, vacc0123456789ABCDEF);
vaccGHIJKLMNOPQRSTUV = _mm512_min_ps(vmax, vaccGHIJKLMNOPQRSTUV);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
_mm512_storeu_ps(output + 16, vaccGHIJKLMNOPQRSTUV);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc = _mm512_loadu_ps(input);
input += 16;
vacc = _mm512_max_ps(vmin, vacc);
vacc = _mm512_min_ps(vmax, vacc);
_mm512_storeu_ps(output, vacc);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input);
vacc = _mm512_max_ps(vmin, vacc);
vacc = _mm512_min_ps(vmax, vacc);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 2,223 | 30.323944 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vclamp/gen/f32-vclamp-neon-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vclamp/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vclamp_ukernel__neon_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vy_min = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t vy_max = vld1q_dup_f32(¶ms->scalar.max);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vacc0123 = vld1q_f32(input); input += 4;
vacc0123 = vmaxq_f32(vacc0123, vy_min);
vacc0123 = vminq_f32(vacc0123, vy_max);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vacc = vld1q_f32(input);
vacc = vmaxq_f32(vacc, vy_min);
vacc = vminq_f32(vacc, vy_max);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 1,486 | 25.553571 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vclamp/gen/f32-vclamp-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vclamp/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vclamp_ukernel__neon_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vy_min = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t vy_max = vld1q_dup_f32(¶ms->scalar.max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vacc0123 = vld1q_f32(input); input += 4;
float32x4_t vacc4567 = vld1q_f32(input); input += 4;
vacc0123 = vmaxq_f32(vacc0123, vy_min);
vacc4567 = vmaxq_f32(vacc4567, vy_min);
vacc0123 = vminq_f32(vacc0123, vy_max);
vacc4567 = vminq_f32(vacc4567, vy_max);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vacc = vld1q_f32(input); input += 4;
vacc = vmaxq_f32(vacc, vy_min);
vacc = vminq_f32(vacc, vy_max);
vst1q_f32(output, vacc); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vacc = vld1q_f32(input);
vacc = vmaxq_f32(vacc, vy_min);
vacc = vminq_f32(vacc, vy_max);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 1,915 | 28.030303 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vclamp/gen/f32-vclamp-rvv-x1v.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vclamp/rvv.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <riscv_vector.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vclamp_ukernel__rvv_x1v(
size_t batch,
const float* input,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
batch >>= XNN_LOG2_SIZEOF_FLOAT;
do {
const size_t n = __riscv_vsetvl_e32m1(batch);
vfloat32m1_t vacc = __riscv_vle32_v_f32m1(input, n);
input += n;
vacc = __riscv_vfmax_vf_f32m1(vacc, vmin, n);
vacc = __riscv_vfmin_vf_f32m1(vacc, vmax, n);
__riscv_vse32_v_f32m1(output, vacc, n);
output += n;
batch -= n;
} while (batch != 0);
}
| 1,174 | 24.543478 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vclamp/gen/f32-vclamp-rvv-x2v.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vclamp/rvv.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <riscv_vector.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vclamp_ukernel__rvv_x2v(
size_t batch,
const float* input,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
batch >>= XNN_LOG2_SIZEOF_FLOAT;
do {
const size_t n = __riscv_vsetvl_e32m2(batch);
vfloat32m2_t vacc = __riscv_vle32_v_f32m2(input, n);
input += n;
vacc = __riscv_vfmax_vf_f32m2(vacc, vmin, n);
vacc = __riscv_vfmin_vf_f32m2(vacc, vmax, n);
__riscv_vse32_v_f32m2(output, vacc, n);
output += n;
batch -= n;
} while (batch != 0);
}
| 1,174 | 24.543478 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vclamp/gen/f32-vclamp-rvv-x4v.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vclamp/rvv.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <riscv_vector.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vclamp_ukernel__rvv_x4v(
size_t batch,
const float* input,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
batch >>= XNN_LOG2_SIZEOF_FLOAT;
do {
const size_t n = __riscv_vsetvl_e32m4(batch);
vfloat32m4_t vacc = __riscv_vle32_v_f32m4(input, n);
input += n;
vacc = __riscv_vfmax_vf_f32m4(vacc, vmin, n);
vacc = __riscv_vfmin_vf_f32m4(vacc, vmax, n);
__riscv_vse32_v_f32m4(output, vacc, n);
output += n;
batch -= n;
} while (batch != 0);
}
| 1,174 | 24.543478 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vclamp/gen/f32-vclamp-rvv-x8v.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vclamp/rvv.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <riscv_vector.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vclamp_ukernel__rvv_x8v(
size_t batch,
const float* input,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
batch >>= XNN_LOG2_SIZEOF_FLOAT;
do {
const size_t n = __riscv_vsetvl_e32m8(batch);
vfloat32m8_t vacc = __riscv_vle32_v_f32m8(input, n);
input += n;
vacc = __riscv_vfmax_vf_f32m8(vacc, vmin, n);
vacc = __riscv_vfmin_vf_f32m8(vacc, vmax, n);
__riscv_vse32_v_f32m8(output, vacc, n);
output += n;
batch -= n;
} while (batch != 0);
}
| 1,174 | 24.543478 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vclamp/gen/f32-vclamp-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vclamp/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vclamp_ukernel__scalar_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vy_min = params->scalar.min;
const float vy_max = params->scalar.max;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
float vacc = *input++;
vacc = math_max_f32(vacc, vy_min);
vacc = math_min_f32(vacc, vy_max);
*output++ = vacc;
}
}
| 950 | 24.026316 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vclamp/gen/f32-vclamp-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vclamp/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vclamp_ukernel__scalar_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vy_min = params->scalar.min;
const float vy_max = params->scalar.max;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
float vacc0 = input[0];
float vacc1 = input[1];
input += 2;
vacc0 = math_max_f32(vacc0, vy_min);
vacc1 = math_max_f32(vacc1, vy_min);
vacc0 = math_min_f32(vacc0, vy_max);
vacc1 = math_min_f32(vacc1, vy_max);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
float vacc = *input;
vacc = math_max_f32(vacc, vy_min);
vacc = math_min_f32(vacc, vy_max);
*output = vacc;
}
}
| 1,292 | 23.396226 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vclamp/gen/f32-vclamp-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vclamp/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vclamp_ukernel__scalar_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vy_min = params->scalar.min;
const float vy_max = params->scalar.max;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float vacc0 = input[0];
float vacc1 = input[1];
float vacc2 = input[2];
float vacc3 = input[3];
input += 4;
vacc0 = math_max_f32(vacc0, vy_min);
vacc1 = math_max_f32(vacc1, vy_min);
vacc2 = math_max_f32(vacc2, vy_min);
vacc3 = math_max_f32(vacc3, vy_min);
vacc0 = math_min_f32(vacc0, vy_max);
vacc1 = math_min_f32(vacc1, vy_max);
vacc2 = math_min_f32(vacc2, vy_max);
vacc3 = math_min_f32(vacc3, vy_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vacc = *input++;
vacc = math_max_f32(vacc, vy_min);
vacc = math_min_f32(vacc, vy_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,635 | 24.5625 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vclamp/gen/f32-vclamp-sse-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vclamp/sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vclamp_ukernel__sse_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vy_min = _mm_load_ps(params->sse.min);
const __m128 vy_max = _mm_load_ps(params->sse.max);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
__m128 vacc0123 = _mm_loadu_ps(input);
input += 4;
vacc0123 = _mm_max_ps(vacc0123, vy_min);
vacc0123 = _mm_min_ps(vacc0123, vy_max);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vacc = _mm_loadu_ps(input);
vacc = _mm_max_ps(vacc, vy_min);
vacc = _mm_min_ps(vacc, vy_max);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 1,442 | 23.87931 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vclamp/gen/f32-vclamp-sse-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vclamp/sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vclamp_ukernel__sse_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vy_min = _mm_load_ps(params->sse.min);
const __m128 vy_max = _mm_load_ps(params->sse.max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m128 vacc0123 = _mm_loadu_ps(input);
__m128 vacc4567 = _mm_loadu_ps(input + 4);
input += 8;
vacc0123 = _mm_max_ps(vacc0123, vy_min);
vacc4567 = _mm_max_ps(vacc4567, vy_min);
vacc0123 = _mm_min_ps(vacc0123, vy_max);
vacc4567 = _mm_min_ps(vacc4567, vy_max);
_mm_storeu_ps(output, vacc0123);
_mm_storeu_ps(output + 4, vacc4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
__m128 vacc = _mm_loadu_ps(input);
input += 4;
vacc = _mm_max_ps(vacc, vy_min);
vacc = _mm_min_ps(vacc, vy_max);
_mm_storeu_ps(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vacc = _mm_loadu_ps(input);
vacc = _mm_max_ps(vacc, vy_min);
vacc = _mm_min_ps(vacc, vy_max);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 1,872 | 25.013889 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vclamp/gen/f32-vclamp-wasm-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vclamp/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vclamp_ukernel__wasm_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vy_min = params->scalar.min;
const float vy_max = params->scalar.max;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
float vacc = *input++;
vacc = __builtin_wasm_max_f32(vacc, vy_min);
vacc = __builtin_wasm_min_f32(vacc, vy_max);
*output++ = vacc;
}
}
| 968 | 24.5 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vclamp/gen/f32-vclamp-wasm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vclamp/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vclamp_ukernel__wasm_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vy_min = params->scalar.min;
const float vy_max = params->scalar.max;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
float vacc0 = input[0];
float vacc1 = input[1];
input += 2;
vacc0 = __builtin_wasm_max_f32(vacc0, vy_min);
vacc1 = __builtin_wasm_max_f32(vacc1, vy_min);
vacc0 = __builtin_wasm_min_f32(vacc0, vy_max);
vacc1 = __builtin_wasm_min_f32(vacc1, vy_max);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
float vacc = *input;
vacc = __builtin_wasm_max_f32(vacc, vy_min);
vacc = __builtin_wasm_min_f32(vacc, vy_max);
*output = vacc;
}
}
| 1,350 | 24.490566 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vclamp/gen/f32-vclamp-wasm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vclamp/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vclamp_ukernel__wasm_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vy_min = params->scalar.min;
const float vy_max = params->scalar.max;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float vacc0 = input[0];
float vacc1 = input[1];
float vacc2 = input[2];
float vacc3 = input[3];
input += 4;
vacc0 = __builtin_wasm_max_f32(vacc0, vy_min);
vacc1 = __builtin_wasm_max_f32(vacc1, vy_min);
vacc2 = __builtin_wasm_max_f32(vacc2, vy_min);
vacc3 = __builtin_wasm_max_f32(vacc3, vy_min);
vacc0 = __builtin_wasm_min_f32(vacc0, vy_max);
vacc1 = __builtin_wasm_min_f32(vacc1, vy_max);
vacc2 = __builtin_wasm_min_f32(vacc2, vy_max);
vacc3 = __builtin_wasm_min_f32(vacc3, vy_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vacc = *input++;
vacc = __builtin_wasm_max_f32(vacc, vy_min);
vacc = __builtin_wasm_min_f32(vacc, vy_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,733 | 26.09375 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vclamp/gen/f32-vclamp-wasmsimd-arm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vclamp/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vclamp_ukernel__wasmsimd_arm_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vy_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vy_max = wasm_v128_load64_splat(params->wasmsimd.max);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vacc = wasm_v128_load(input);
input += 4;
vacc = wasm_f32x4_max(vacc, vy_min);
vacc = wasm_f32x4_min(vacc, vy_max);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vacc = wasm_v128_load(input);
vacc = wasm_f32x4_max(vacc, vy_min);
vacc = wasm_f32x4_min(vacc, vy_max);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,516 | 25.155172 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vclamp/gen/f32-vclamp-wasmsimd-arm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vclamp/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vclamp_ukernel__wasmsimd_arm_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vy_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vy_max = wasm_v128_load64_splat(params->wasmsimd.max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vacc0123 = wasm_v128_load(input);
v128_t vacc4567 = wasm_v128_load(input + 4);
input += 8;
vacc0123 = wasm_f32x4_max(vacc0123, vy_min);
vacc4567 = wasm_f32x4_max(vacc4567, vy_min);
vacc0123 = wasm_f32x4_min(vacc0123, vy_max);
vacc4567 = wasm_f32x4_min(vacc4567, vy_max);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vacc = wasm_v128_load(input);
input += 4;
vacc = wasm_f32x4_max(vacc, vy_min);
vacc = wasm_f32x4_min(vacc, vy_max);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vacc = wasm_v128_load(input);
vacc = wasm_f32x4_max(vacc, vy_min);
vacc = wasm_f32x4_min(vacc, vy_max);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,995 | 26.342466 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vclamp/gen/f32-vclamp-wasmsimd-x86-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vclamp/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vclamp_ukernel__wasmsimd_x86_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vy_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vy_max = wasm_v128_load64_splat(params->wasmsimd.max);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vacc = wasm_v128_load(input);
input += 4;
vacc = wasm_f32x4_pmax(vy_min, vacc);
vacc = wasm_f32x4_pmin(vy_max, vacc);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vacc = wasm_v128_load(input);
vacc = wasm_f32x4_pmax(vy_min, vacc);
vacc = wasm_f32x4_pmin(vy_max, vacc);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,520 | 25.224138 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vclamp/gen/f32-vclamp-wasmsimd-x86-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vclamp/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vclamp_ukernel__wasmsimd_x86_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vy_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vy_max = wasm_v128_load64_splat(params->wasmsimd.max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vacc0123 = wasm_v128_load(input);
v128_t vacc4567 = wasm_v128_load(input + 4);
input += 8;
vacc0123 = wasm_f32x4_pmax(vy_min, vacc0123);
vacc4567 = wasm_f32x4_pmax(vy_min, vacc4567);
vacc0123 = wasm_f32x4_pmin(vy_max, vacc0123);
vacc4567 = wasm_f32x4_pmin(vy_max, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vacc = wasm_v128_load(input);
input += 4;
vacc = wasm_f32x4_pmax(vy_min, vacc);
vacc = wasm_f32x4_pmin(vy_max, vacc);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vacc = wasm_v128_load(input);
vacc = wasm_f32x4_pmax(vy_min, vacc);
vacc = wasm_f32x4_pmin(vy_max, vacc);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 2,003 | 26.452055 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vcmul/gen/f32-vcmul-neon-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vcmul/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vcmul_ukernel__neon_x12(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params* params) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float* ar = input_a;
const float* ai = (const float*) ((uintptr_t) input_a + batch);
const float* br = input_b;
const float* bi = (const float*) ((uintptr_t) input_b + batch);
float* or = output;
float* oi = (float*) ((uintptr_t) output + batch);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t va0r = vld1q_f32(ar); ar += 4;
const float32x4_t va0i = vld1q_f32(ai); ai += 4;
const float32x4_t vb0r = vld1q_f32(br); br += 4;
const float32x4_t vb0i = vld1q_f32(bi); bi += 4;
const float32x4_t va1r = vld1q_f32(ar); ar += 4;
const float32x4_t va1i = vld1q_f32(ai); ai += 4;
const float32x4_t vb1r = vld1q_f32(br); br += 4;
const float32x4_t vb1i = vld1q_f32(bi); bi += 4;
const float32x4_t va2r = vld1q_f32(ar); ar += 4;
const float32x4_t va2i = vld1q_f32(ai); ai += 4;
const float32x4_t vb2r = vld1q_f32(br); br += 4;
const float32x4_t vb2i = vld1q_f32(bi); bi += 4;
float32x4_t vacc0r = vmulq_f32(va0r, vb0r);
float32x4_t vacc0i = vmulq_f32(va0r, vb0i);
float32x4_t vacc1r = vmulq_f32(va1r, vb1r);
float32x4_t vacc1i = vmulq_f32(va1r, vb1i);
float32x4_t vacc2r = vmulq_f32(va2r, vb2r);
float32x4_t vacc2i = vmulq_f32(va2r, vb2i);
vacc0r = vmlsq_f32(vacc0r, va0i, vb0i);
vacc0i = vmlaq_f32(vacc0i, va0i, vb0r);
vacc1r = vmlsq_f32(vacc1r, va1i, vb1i);
vacc1i = vmlaq_f32(vacc1i, va1i, vb1r);
vacc2r = vmlsq_f32(vacc2r, va2i, vb2i);
vacc2i = vmlaq_f32(vacc2i, va2i, vb2r);
vst1q_f32(or, vacc0r); or += 4;
vst1q_f32(oi, vacc0i); oi += 4;
vst1q_f32(or, vacc1r); or += 4;
vst1q_f32(oi, vacc1i); oi += 4;
vst1q_f32(or, vacc2r); or += 4;
vst1q_f32(oi, vacc2i); oi += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t var = vld1q_f32(ar); ar += 4;
const float32x4_t vai = vld1q_f32(ai); ai += 4;
const float32x4_t vbr = vld1q_f32(br); br += 4;
const float32x4_t vbi = vld1q_f32(bi); bi += 4;
float32x4_t vaccr = vmulq_f32(var, vbr);
float32x4_t vacci = vmulq_f32(var, vbi);
vaccr = vmlsq_f32(vaccr, vai, vbi);
vacci = vmlaq_f32(vacci, vai, vbr);
vst1q_f32(or, vaccr); or += 4;
vst1q_f32(oi, vacci); oi += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t var = vld1q_f32(ar); ar += 4;
const float32x4_t vai = vld1q_f32(ai); ai += 4;
const float32x4_t vbr = vld1q_f32(br); br += 4;
const float32x4_t vbi = vld1q_f32(bi); bi += 4;
float32x4_t vaccr = vmulq_f32(var, vbr);
float32x4_t vacci = vmulq_f32(var, vbi);
vaccr = vmlsq_f32(vaccr, vai, vbi);
vacci = vmlaq_f32(vacci, vai, vbr);
float32x2_t vaccr_lo = vget_low_f32(vaccr);
float32x2_t vacci_lo = vget_low_f32(vacci);
if (batch & (2 * sizeof(float))) {
vst1_f32(or, vaccr_lo); or += 2;
vst1_f32(oi, vacci_lo); oi += 2;
vaccr_lo = vget_high_f32(vaccr);
vacci_lo = vget_high_f32(vacci);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(or, vaccr_lo, 0);
vst1_lane_f32(oi, vacci_lo, 0);
}
}
}
| 3,811 | 32.734513 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vcmul/gen/f32-vcmul-neon-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vcmul/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vcmul_ukernel__neon_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params* params) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float* ar = input_a;
const float* ai = (const float*) ((uintptr_t) input_a + batch);
const float* br = input_b;
const float* bi = (const float*) ((uintptr_t) input_b + batch);
float* or = output;
float* oi = (float*) ((uintptr_t) output + batch);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t va0r = vld1q_f32(ar); ar += 4;
const float32x4_t va0i = vld1q_f32(ai); ai += 4;
const float32x4_t vb0r = vld1q_f32(br); br += 4;
const float32x4_t vb0i = vld1q_f32(bi); bi += 4;
const float32x4_t va1r = vld1q_f32(ar); ar += 4;
const float32x4_t va1i = vld1q_f32(ai); ai += 4;
const float32x4_t vb1r = vld1q_f32(br); br += 4;
const float32x4_t vb1i = vld1q_f32(bi); bi += 4;
const float32x4_t va2r = vld1q_f32(ar); ar += 4;
const float32x4_t va2i = vld1q_f32(ai); ai += 4;
const float32x4_t vb2r = vld1q_f32(br); br += 4;
const float32x4_t vb2i = vld1q_f32(bi); bi += 4;
const float32x4_t va3r = vld1q_f32(ar); ar += 4;
const float32x4_t va3i = vld1q_f32(ai); ai += 4;
const float32x4_t vb3r = vld1q_f32(br); br += 4;
const float32x4_t vb3i = vld1q_f32(bi); bi += 4;
float32x4_t vacc0r = vmulq_f32(va0r, vb0r);
float32x4_t vacc0i = vmulq_f32(va0r, vb0i);
float32x4_t vacc1r = vmulq_f32(va1r, vb1r);
float32x4_t vacc1i = vmulq_f32(va1r, vb1i);
float32x4_t vacc2r = vmulq_f32(va2r, vb2r);
float32x4_t vacc2i = vmulq_f32(va2r, vb2i);
float32x4_t vacc3r = vmulq_f32(va3r, vb3r);
float32x4_t vacc3i = vmulq_f32(va3r, vb3i);
vacc0r = vmlsq_f32(vacc0r, va0i, vb0i);
vacc0i = vmlaq_f32(vacc0i, va0i, vb0r);
vacc1r = vmlsq_f32(vacc1r, va1i, vb1i);
vacc1i = vmlaq_f32(vacc1i, va1i, vb1r);
vacc2r = vmlsq_f32(vacc2r, va2i, vb2i);
vacc2i = vmlaq_f32(vacc2i, va2i, vb2r);
vacc3r = vmlsq_f32(vacc3r, va3i, vb3i);
vacc3i = vmlaq_f32(vacc3i, va3i, vb3r);
vst1q_f32(or, vacc0r); or += 4;
vst1q_f32(oi, vacc0i); oi += 4;
vst1q_f32(or, vacc1r); or += 4;
vst1q_f32(oi, vacc1i); oi += 4;
vst1q_f32(or, vacc2r); or += 4;
vst1q_f32(oi, vacc2i); oi += 4;
vst1q_f32(or, vacc3r); or += 4;
vst1q_f32(oi, vacc3i); oi += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t var = vld1q_f32(ar); ar += 4;
const float32x4_t vai = vld1q_f32(ai); ai += 4;
const float32x4_t vbr = vld1q_f32(br); br += 4;
const float32x4_t vbi = vld1q_f32(bi); bi += 4;
float32x4_t vaccr = vmulq_f32(var, vbr);
float32x4_t vacci = vmulq_f32(var, vbi);
vaccr = vmlsq_f32(vaccr, vai, vbi);
vacci = vmlaq_f32(vacci, vai, vbr);
vst1q_f32(or, vaccr); or += 4;
vst1q_f32(oi, vacci); oi += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t var = vld1q_f32(ar); ar += 4;
const float32x4_t vai = vld1q_f32(ai); ai += 4;
const float32x4_t vbr = vld1q_f32(br); br += 4;
const float32x4_t vbi = vld1q_f32(bi); bi += 4;
float32x4_t vaccr = vmulq_f32(var, vbr);
float32x4_t vacci = vmulq_f32(var, vbi);
vaccr = vmlsq_f32(vaccr, vai, vbi);
vacci = vmlaq_f32(vacci, vai, vbr);
float32x2_t vaccr_lo = vget_low_f32(vaccr);
float32x2_t vacci_lo = vget_low_f32(vacci);
if (batch & (2 * sizeof(float))) {
vst1_f32(or, vaccr_lo); or += 2;
vst1_f32(oi, vacci_lo); oi += 2;
vaccr_lo = vget_high_f32(vaccr);
vacci_lo = vget_high_f32(vacci);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(or, vaccr_lo, 0);
vst1_lane_f32(oi, vacci_lo, 0);
}
}
}
| 4,279 | 33.796748 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vcmul/gen/f32-vcmul-neon-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vcmul/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vcmul_ukernel__neon_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params* params) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float* ar = input_a;
const float* ai = (const float*) ((uintptr_t) input_a + batch);
const float* br = input_b;
const float* bi = (const float*) ((uintptr_t) input_b + batch);
float* or = output;
float* oi = (float*) ((uintptr_t) output + batch);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t var = vld1q_f32(ar); ar += 4;
const float32x4_t vai = vld1q_f32(ai); ai += 4;
const float32x4_t vbr = vld1q_f32(br); br += 4;
const float32x4_t vbi = vld1q_f32(bi); bi += 4;
float32x4_t vaccr = vmulq_f32(var, vbr);
float32x4_t vacci = vmulq_f32(var, vbi);
vaccr = vmlsq_f32(vaccr, vai, vbi);
vacci = vmlaq_f32(vacci, vai, vbr);
vst1q_f32(or, vaccr); or += 4;
vst1q_f32(oi, vacci); oi += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t var = vld1q_f32(ar); ar += 4;
const float32x4_t vai = vld1q_f32(ai); ai += 4;
const float32x4_t vbr = vld1q_f32(br); br += 4;
const float32x4_t vbi = vld1q_f32(bi); bi += 4;
float32x4_t vaccr = vmulq_f32(var, vbr);
float32x4_t vacci = vmulq_f32(var, vbi);
vaccr = vmlsq_f32(vaccr, vai, vbi);
vacci = vmlaq_f32(vacci, vai, vbr);
float32x2_t vaccr_lo = vget_low_f32(vaccr);
float32x2_t vacci_lo = vget_low_f32(vacci);
if (batch & (2 * sizeof(float))) {
vst1_f32(or, vaccr_lo); or += 2;
vst1_f32(oi, vacci_lo); oi += 2;
vaccr_lo = vget_high_f32(vaccr);
vacci_lo = vget_high_f32(vacci);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(or, vaccr_lo, 0);
vst1_lane_f32(oi, vacci_lo, 0);
}
}
}
| 2,330 | 28.884615 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vcmul/gen/f32-vcmul-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vcmul/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vcmul_ukernel__neon_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params* params) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float* ar = input_a;
const float* ai = (const float*) ((uintptr_t) input_a + batch);
const float* br = input_b;
const float* bi = (const float*) ((uintptr_t) input_b + batch);
float* or = output;
float* oi = (float*) ((uintptr_t) output + batch);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t va0r = vld1q_f32(ar); ar += 4;
const float32x4_t va0i = vld1q_f32(ai); ai += 4;
const float32x4_t vb0r = vld1q_f32(br); br += 4;
const float32x4_t vb0i = vld1q_f32(bi); bi += 4;
const float32x4_t va1r = vld1q_f32(ar); ar += 4;
const float32x4_t va1i = vld1q_f32(ai); ai += 4;
const float32x4_t vb1r = vld1q_f32(br); br += 4;
const float32x4_t vb1i = vld1q_f32(bi); bi += 4;
float32x4_t vacc0r = vmulq_f32(va0r, vb0r);
float32x4_t vacc0i = vmulq_f32(va0r, vb0i);
float32x4_t vacc1r = vmulq_f32(va1r, vb1r);
float32x4_t vacc1i = vmulq_f32(va1r, vb1i);
vacc0r = vmlsq_f32(vacc0r, va0i, vb0i);
vacc0i = vmlaq_f32(vacc0i, va0i, vb0r);
vacc1r = vmlsq_f32(vacc1r, va1i, vb1i);
vacc1i = vmlaq_f32(vacc1i, va1i, vb1r);
vst1q_f32(or, vacc0r); or += 4;
vst1q_f32(oi, vacc0i); oi += 4;
vst1q_f32(or, vacc1r); or += 4;
vst1q_f32(oi, vacc1i); oi += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t var = vld1q_f32(ar); ar += 4;
const float32x4_t vai = vld1q_f32(ai); ai += 4;
const float32x4_t vbr = vld1q_f32(br); br += 4;
const float32x4_t vbi = vld1q_f32(bi); bi += 4;
float32x4_t vaccr = vmulq_f32(var, vbr);
float32x4_t vacci = vmulq_f32(var, vbi);
vaccr = vmlsq_f32(vaccr, vai, vbi);
vacci = vmlaq_f32(vacci, vai, vbr);
vst1q_f32(or, vaccr); or += 4;
vst1q_f32(oi, vacci); oi += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t var = vld1q_f32(ar); ar += 4;
const float32x4_t vai = vld1q_f32(ai); ai += 4;
const float32x4_t vbr = vld1q_f32(br); br += 4;
const float32x4_t vbi = vld1q_f32(bi); bi += 4;
float32x4_t vaccr = vmulq_f32(var, vbr);
float32x4_t vacci = vmulq_f32(var, vbi);
vaccr = vmlsq_f32(vaccr, vai, vbi);
vacci = vmlaq_f32(vacci, vai, vbr);
float32x2_t vaccr_lo = vget_low_f32(vaccr);
float32x2_t vacci_lo = vget_low_f32(vacci);
if (batch & (2 * sizeof(float))) {
vst1_f32(or, vaccr_lo); or += 2;
vst1_f32(oi, vacci_lo); oi += 2;
vaccr_lo = vget_high_f32(vaccr);
vacci_lo = vget_high_f32(vacci);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(or, vaccr_lo, 0);
vst1_lane_f32(oi, vacci_lo, 0);
}
}
}
| 3,340 | 31.436893 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vcmul/gen/f32-vcmul-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vcmul/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vcmul_ukernel__scalar_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float* ar = input_a;
const float* ai = (const float*) ((uintptr_t) input_a + batch);
const float* br = input_b;
const float* bi = (const float*) ((uintptr_t) input_b + batch);
float* or = output;
float* oi = (float*) ((uintptr_t) output + batch);
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float var = *ar++;
const float vai = *ai++;
const float vbr = *br++;
const float vbi = *bi++;
const float vaccr = var * vbr - vai * vbi;
const float vacci = var * vbi + vai * vbr;
*or++ = vaccr;
*oi++ = vacci;
}
}
| 1,277 | 26.191489 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vcmul/gen/f32-vcmul-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vcmul/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vcmul_ukernel__scalar_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float* ar = input_a;
const float* ai = (const float*) ((uintptr_t) input_a + batch);
const float* br = input_b;
const float* bi = (const float*) ((uintptr_t) input_b + batch);
float* or = output;
float* oi = (float*) ((uintptr_t) output + batch);
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0r = ar[0];
const float va1r = ar[1];
ar += 2;
const float va0i = ai[0];
const float va1i = ai[1];
ai += 2;
const float vb0r = br[0];
const float vb1r = br[1];
br += 2;
const float vb0i = bi[0];
const float vb1i = bi[1];
bi += 2;
const float vacc0r = va0r * vb0r - va0i * vb0i;
const float vacc1r = va1r * vb1r - va1i * vb1i;
const float vacc0i = va0r * vb0i + va0i * vb0r;
const float vacc1i = va1r * vb1i + va1i * vb1r;
or[0] = vacc0r;
or[1] = vacc1r;
or += 2;
oi[0] = vacc0i;
oi[1] = vacc1i;
oi += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float var = *ar;
const float vai = *ai;
const float vbr = *br;
const float vbi = *bi;
const float vaccr = var * vbr - vai * vbi;
const float vacci = var * vbi + vai * vbr;
*or = vaccr;
*oi = vacci;
}
}
| 1,958 | 23.797468 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vcmul/gen/f32-vcmul-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vcmul/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vcmul_ukernel__scalar_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float* ar = input_a;
const float* ai = (const float*) ((uintptr_t) input_a + batch);
const float* br = input_b;
const float* bi = (const float*) ((uintptr_t) input_b + batch);
float* or = output;
float* oi = (float*) ((uintptr_t) output + batch);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0r = ar[0];
const float va1r = ar[1];
const float va2r = ar[2];
const float va3r = ar[3];
ar += 4;
const float va0i = ai[0];
const float va1i = ai[1];
const float va2i = ai[2];
const float va3i = ai[3];
ai += 4;
const float vb0r = br[0];
const float vb1r = br[1];
const float vb2r = br[2];
const float vb3r = br[3];
br += 4;
const float vb0i = bi[0];
const float vb1i = bi[1];
const float vb2i = bi[2];
const float vb3i = bi[3];
bi += 4;
const float vacc0r = va0r * vb0r - va0i * vb0i;
const float vacc1r = va1r * vb1r - va1i * vb1i;
const float vacc2r = va2r * vb2r - va2i * vb2i;
const float vacc3r = va3r * vb3r - va3i * vb3i;
const float vacc0i = va0r * vb0i + va0i * vb0r;
const float vacc1i = va1r * vb1i + va1i * vb1r;
const float vacc2i = va2r * vb2i + va2i * vb2r;
const float vacc3i = va3r * vb3i + va3i * vb3r;
or[0] = vacc0r;
or[1] = vacc1r;
or[2] = vacc2r;
or[3] = vacc3r;
or += 4;
oi[0] = vacc0i;
oi[1] = vacc1i;
oi[2] = vacc2i;
oi[3] = vacc3i;
oi += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float var = *ar++;
const float vai = *ai++;
const float vbr = *br++;
const float vbi = *bi++;
const float vaccr = var * vbr - vai * vbi;
const float vacci = var * vbi + vai * vbr;
*or++ = vaccr;
*oi++ = vacci;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,543 | 25.226804 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vcmul/gen/f32-vcmul-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vcmul/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vcmul_ukernel__scalar_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float* ar = input_a;
const float* ai = (const float*) ((uintptr_t) input_a + batch);
const float* br = input_b;
const float* bi = (const float*) ((uintptr_t) input_b + batch);
float* or = output;
float* oi = (float*) ((uintptr_t) output + batch);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0r = ar[0];
const float va1r = ar[1];
const float va2r = ar[2];
const float va3r = ar[3];
const float va4r = ar[4];
const float va5r = ar[5];
const float va6r = ar[6];
const float va7r = ar[7];
ar += 8;
const float va0i = ai[0];
const float va1i = ai[1];
const float va2i = ai[2];
const float va3i = ai[3];
const float va4i = ai[4];
const float va5i = ai[5];
const float va6i = ai[6];
const float va7i = ai[7];
ai += 8;
const float vb0r = br[0];
const float vb1r = br[1];
const float vb2r = br[2];
const float vb3r = br[3];
const float vb4r = br[4];
const float vb5r = br[5];
const float vb6r = br[6];
const float vb7r = br[7];
br += 8;
const float vb0i = bi[0];
const float vb1i = bi[1];
const float vb2i = bi[2];
const float vb3i = bi[3];
const float vb4i = bi[4];
const float vb5i = bi[5];
const float vb6i = bi[6];
const float vb7i = bi[7];
bi += 8;
const float vacc0r = va0r * vb0r - va0i * vb0i;
const float vacc1r = va1r * vb1r - va1i * vb1i;
const float vacc2r = va2r * vb2r - va2i * vb2i;
const float vacc3r = va3r * vb3r - va3i * vb3i;
const float vacc4r = va4r * vb4r - va4i * vb4i;
const float vacc5r = va5r * vb5r - va5i * vb5i;
const float vacc6r = va6r * vb6r - va6i * vb6i;
const float vacc7r = va7r * vb7r - va7i * vb7i;
const float vacc0i = va0r * vb0i + va0i * vb0r;
const float vacc1i = va1r * vb1i + va1i * vb1r;
const float vacc2i = va2r * vb2i + va2i * vb2r;
const float vacc3i = va3r * vb3i + va3i * vb3r;
const float vacc4i = va4r * vb4i + va4i * vb4r;
const float vacc5i = va5r * vb5i + va5i * vb5r;
const float vacc6i = va6r * vb6i + va6i * vb6r;
const float vacc7i = va7r * vb7i + va7i * vb7r;
or[0] = vacc0r;
or[1] = vacc1r;
or[2] = vacc2r;
or[3] = vacc3r;
or[4] = vacc4r;
or[5] = vacc5r;
or[6] = vacc6r;
or[7] = vacc7r;
or += 8;
oi[0] = vacc0i;
oi[1] = vacc1i;
oi[2] = vacc2i;
oi[3] = vacc3i;
oi[4] = vacc4i;
oi[5] = vacc5i;
oi[6] = vacc6i;
oi[7] = vacc7i;
oi += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float var = *ar++;
const float vai = *ai++;
const float vbr = *br++;
const float vbi = *bi++;
const float vaccr = var * vbr - vai * vbi;
const float vacci = var * vbi + vai * vbr;
*or++ = vaccr;
*oi++ = vacci;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 3,599 | 26.906977 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vcmul/gen/f32-vcmul-sse-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vcmul/sse.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vcmul_ukernel__sse_x12(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params* params) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float* ar = input_a;
const float* ai = (const float*) ((uintptr_t) input_a + batch);
const float* br = input_b;
const float* bi = (const float*) ((uintptr_t) input_b + batch);
float* or = output;
float* oi = (float*) ((uintptr_t) output + batch);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const __m128 va0r = _mm_loadu_ps(ar);
const __m128 va0i = _mm_loadu_ps(ai);
const __m128 vb0r = _mm_loadu_ps(br);
const __m128 vb0i = _mm_loadu_ps(bi);
const __m128 va1r = _mm_loadu_ps(ar + 4);
const __m128 va1i = _mm_loadu_ps(ai + 4);
const __m128 vb1r = _mm_loadu_ps(br + 4);
const __m128 vb1i = _mm_loadu_ps(bi + 4);
const __m128 va2r = _mm_loadu_ps(ar + 8);
const __m128 va2i = _mm_loadu_ps(ai + 8);
const __m128 vb2r = _mm_loadu_ps(br + 8);
const __m128 vb2i = _mm_loadu_ps(bi + 8);
ar += 12;
ai += 12;
br += 12;
bi += 12;
__m128 vacc0r = _mm_mul_ps(va0r, vb0r);
__m128 vacc0i = _mm_mul_ps(va0r, vb0i);
__m128 vacc1r = _mm_mul_ps(va1r, vb1r);
__m128 vacc1i = _mm_mul_ps(va1r, vb1i);
__m128 vacc2r = _mm_mul_ps(va2r, vb2r);
__m128 vacc2i = _mm_mul_ps(va2r, vb2i);
vacc0r = _mm_sub_ps(vacc0r, _mm_mul_ps(va0i, vb0i));
vacc0i = _mm_add_ps(vacc0i, _mm_mul_ps(va0i, vb0r));
vacc1r = _mm_sub_ps(vacc1r, _mm_mul_ps(va1i, vb1i));
vacc1i = _mm_add_ps(vacc1i, _mm_mul_ps(va1i, vb1r));
vacc2r = _mm_sub_ps(vacc2r, _mm_mul_ps(va2i, vb2i));
vacc2i = _mm_add_ps(vacc2i, _mm_mul_ps(va2i, vb2r));
_mm_storeu_ps(or, vacc0r);
_mm_storeu_ps(oi, vacc0i);
_mm_storeu_ps(or + 4, vacc1r);
_mm_storeu_ps(oi + 4, vacc1i);
_mm_storeu_ps(or + 8, vacc2r);
_mm_storeu_ps(oi + 8, vacc2i);
or += 12;
oi += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 var = _mm_loadu_ps(ar);
ar += 4;
const __m128 vai = _mm_loadu_ps(ai);
ai += 4;
const __m128 vbr = _mm_loadu_ps(br);
br += 4;
const __m128 vbi = _mm_loadu_ps(bi);
bi += 4;
__m128 vaccr = _mm_mul_ps(var, vbr);
__m128 vacci = _mm_mul_ps(var, vbi);
vaccr = _mm_sub_ps(vaccr, _mm_mul_ps(vai, vbi));
vacci = _mm_add_ps(vacci, _mm_mul_ps(vai, vbr));
_mm_storeu_ps(or, vaccr);
or += 4;
_mm_storeu_ps(oi, vacci);
oi += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 var = _mm_loadu_ps(ar);
ar += 4;
const __m128 vai = _mm_loadu_ps(ai);
ai += 4;
const __m128 vbr = _mm_loadu_ps(br);
br += 4;
const __m128 vbi = _mm_loadu_ps(bi);
bi += 4;
__m128 vaccr = _mm_mul_ps(var, vbr);
__m128 vacci = _mm_mul_ps(var, vbi);
vaccr = _mm_sub_ps(vaccr, _mm_mul_ps(vai, vbi));
vacci = _mm_add_ps(vacci, _mm_mul_ps(vai, vbr));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) or, vaccr);
or += 2;
_mm_storel_pi((__m64*) oi, vacci);
oi += 2;
vaccr = _mm_movehl_ps(vaccr, vaccr);
vacci = _mm_movehl_ps(vacci, vacci);
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(or, vaccr);
_mm_store_ss(oi, vacci);
}
}
}
| 3,834 | 28.728682 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vcmul/gen/f32-vcmul-sse-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vcmul/sse.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vcmul_ukernel__sse_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params* params) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float* ar = input_a;
const float* ai = (const float*) ((uintptr_t) input_a + batch);
const float* br = input_b;
const float* bi = (const float*) ((uintptr_t) input_b + batch);
float* or = output;
float* oi = (float*) ((uintptr_t) output + batch);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m128 va0r = _mm_loadu_ps(ar);
const __m128 va0i = _mm_loadu_ps(ai);
const __m128 vb0r = _mm_loadu_ps(br);
const __m128 vb0i = _mm_loadu_ps(bi);
const __m128 va1r = _mm_loadu_ps(ar + 4);
const __m128 va1i = _mm_loadu_ps(ai + 4);
const __m128 vb1r = _mm_loadu_ps(br + 4);
const __m128 vb1i = _mm_loadu_ps(bi + 4);
const __m128 va2r = _mm_loadu_ps(ar + 8);
const __m128 va2i = _mm_loadu_ps(ai + 8);
const __m128 vb2r = _mm_loadu_ps(br + 8);
const __m128 vb2i = _mm_loadu_ps(bi + 8);
const __m128 va3r = _mm_loadu_ps(ar + 12);
const __m128 va3i = _mm_loadu_ps(ai + 12);
const __m128 vb3r = _mm_loadu_ps(br + 12);
const __m128 vb3i = _mm_loadu_ps(bi + 12);
ar += 16;
ai += 16;
br += 16;
bi += 16;
__m128 vacc0r = _mm_mul_ps(va0r, vb0r);
__m128 vacc0i = _mm_mul_ps(va0r, vb0i);
__m128 vacc1r = _mm_mul_ps(va1r, vb1r);
__m128 vacc1i = _mm_mul_ps(va1r, vb1i);
__m128 vacc2r = _mm_mul_ps(va2r, vb2r);
__m128 vacc2i = _mm_mul_ps(va2r, vb2i);
__m128 vacc3r = _mm_mul_ps(va3r, vb3r);
__m128 vacc3i = _mm_mul_ps(va3r, vb3i);
vacc0r = _mm_sub_ps(vacc0r, _mm_mul_ps(va0i, vb0i));
vacc0i = _mm_add_ps(vacc0i, _mm_mul_ps(va0i, vb0r));
vacc1r = _mm_sub_ps(vacc1r, _mm_mul_ps(va1i, vb1i));
vacc1i = _mm_add_ps(vacc1i, _mm_mul_ps(va1i, vb1r));
vacc2r = _mm_sub_ps(vacc2r, _mm_mul_ps(va2i, vb2i));
vacc2i = _mm_add_ps(vacc2i, _mm_mul_ps(va2i, vb2r));
vacc3r = _mm_sub_ps(vacc3r, _mm_mul_ps(va3i, vb3i));
vacc3i = _mm_add_ps(vacc3i, _mm_mul_ps(va3i, vb3r));
_mm_storeu_ps(or, vacc0r);
_mm_storeu_ps(oi, vacc0i);
_mm_storeu_ps(or + 4, vacc1r);
_mm_storeu_ps(oi + 4, vacc1i);
_mm_storeu_ps(or + 8, vacc2r);
_mm_storeu_ps(oi + 8, vacc2i);
_mm_storeu_ps(or + 12, vacc3r);
_mm_storeu_ps(oi + 12, vacc3i);
or += 16;
oi += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 var = _mm_loadu_ps(ar);
ar += 4;
const __m128 vai = _mm_loadu_ps(ai);
ai += 4;
const __m128 vbr = _mm_loadu_ps(br);
br += 4;
const __m128 vbi = _mm_loadu_ps(bi);
bi += 4;
__m128 vaccr = _mm_mul_ps(var, vbr);
__m128 vacci = _mm_mul_ps(var, vbi);
vaccr = _mm_sub_ps(vaccr, _mm_mul_ps(vai, vbi));
vacci = _mm_add_ps(vacci, _mm_mul_ps(vai, vbr));
_mm_storeu_ps(or, vaccr);
or += 4;
_mm_storeu_ps(oi, vacci);
oi += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 var = _mm_loadu_ps(ar);
ar += 4;
const __m128 vai = _mm_loadu_ps(ai);
ai += 4;
const __m128 vbr = _mm_loadu_ps(br);
br += 4;
const __m128 vbi = _mm_loadu_ps(bi);
bi += 4;
__m128 vaccr = _mm_mul_ps(var, vbr);
__m128 vacci = _mm_mul_ps(var, vbi);
vaccr = _mm_sub_ps(vaccr, _mm_mul_ps(vai, vbi));
vacci = _mm_add_ps(vacci, _mm_mul_ps(vai, vbr));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) or, vaccr);
or += 2;
_mm_storel_pi((__m64*) oi, vacci);
oi += 2;
vaccr = _mm_movehl_ps(vaccr, vaccr);
vacci = _mm_movehl_ps(vacci, vacci);
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(or, vaccr);
_mm_store_ss(oi, vacci);
}
}
}
| 4,296 | 29.913669 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vcmul/gen/f32-vcmul-sse-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vcmul/sse.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vcmul_ukernel__sse_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params* params) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float* ar = input_a;
const float* ai = (const float*) ((uintptr_t) input_a + batch);
const float* br = input_b;
const float* bi = (const float*) ((uintptr_t) input_b + batch);
float* or = output;
float* oi = (float*) ((uintptr_t) output + batch);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 var = _mm_loadu_ps(ar);
ar += 4;
const __m128 vai = _mm_loadu_ps(ai);
ai += 4;
const __m128 vbr = _mm_loadu_ps(br);
br += 4;
const __m128 vbi = _mm_loadu_ps(bi);
bi += 4;
__m128 vaccr = _mm_mul_ps(var, vbr);
__m128 vacci = _mm_mul_ps(var, vbi);
vaccr = _mm_sub_ps(vaccr, _mm_mul_ps(vai, vbi));
vacci = _mm_add_ps(vacci, _mm_mul_ps(vai, vbr));
_mm_storeu_ps(or, vaccr);
or += 4;
_mm_storeu_ps(oi, vacci);
oi += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 var = _mm_loadu_ps(ar);
ar += 4;
const __m128 vai = _mm_loadu_ps(ai);
ai += 4;
const __m128 vbr = _mm_loadu_ps(br);
br += 4;
const __m128 vbi = _mm_loadu_ps(bi);
bi += 4;
__m128 vaccr = _mm_mul_ps(var, vbr);
__m128 vacci = _mm_mul_ps(var, vbi);
vaccr = _mm_sub_ps(vaccr, _mm_mul_ps(vai, vbi));
vacci = _mm_add_ps(vacci, _mm_mul_ps(vai, vbr));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) or, vaccr);
or += 2;
_mm_storel_pi((__m64*) oi, vacci);
oi += 2;
vaccr = _mm_movehl_ps(vaccr, vaccr);
vacci = _mm_movehl_ps(vacci, vacci);
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(or, vaccr);
_mm_store_ss(oi, vacci);
}
}
}
| 2,329 | 25.477273 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vcmul/gen/f32-vcmul-sse-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vcmul/sse.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vcmul_ukernel__sse_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params* params) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float* ar = input_a;
const float* ai = (const float*) ((uintptr_t) input_a + batch);
const float* br = input_b;
const float* bi = (const float*) ((uintptr_t) input_b + batch);
float* or = output;
float* oi = (float*) ((uintptr_t) output + batch);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 va0r = _mm_loadu_ps(ar);
const __m128 va0i = _mm_loadu_ps(ai);
const __m128 vb0r = _mm_loadu_ps(br);
const __m128 vb0i = _mm_loadu_ps(bi);
const __m128 va1r = _mm_loadu_ps(ar + 4);
const __m128 va1i = _mm_loadu_ps(ai + 4);
const __m128 vb1r = _mm_loadu_ps(br + 4);
const __m128 vb1i = _mm_loadu_ps(bi + 4);
ar += 8;
ai += 8;
br += 8;
bi += 8;
__m128 vacc0r = _mm_mul_ps(va0r, vb0r);
__m128 vacc0i = _mm_mul_ps(va0r, vb0i);
__m128 vacc1r = _mm_mul_ps(va1r, vb1r);
__m128 vacc1i = _mm_mul_ps(va1r, vb1i);
vacc0r = _mm_sub_ps(vacc0r, _mm_mul_ps(va0i, vb0i));
vacc0i = _mm_add_ps(vacc0i, _mm_mul_ps(va0i, vb0r));
vacc1r = _mm_sub_ps(vacc1r, _mm_mul_ps(va1i, vb1i));
vacc1i = _mm_add_ps(vacc1i, _mm_mul_ps(va1i, vb1r));
_mm_storeu_ps(or, vacc0r);
_mm_storeu_ps(oi, vacc0i);
_mm_storeu_ps(or + 4, vacc1r);
_mm_storeu_ps(oi + 4, vacc1i);
or += 8;
oi += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 var = _mm_loadu_ps(ar);
ar += 4;
const __m128 vai = _mm_loadu_ps(ai);
ai += 4;
const __m128 vbr = _mm_loadu_ps(br);
br += 4;
const __m128 vbi = _mm_loadu_ps(bi);
bi += 4;
__m128 vaccr = _mm_mul_ps(var, vbr);
__m128 vacci = _mm_mul_ps(var, vbi);
vaccr = _mm_sub_ps(vaccr, _mm_mul_ps(vai, vbi));
vacci = _mm_add_ps(vacci, _mm_mul_ps(vai, vbr));
_mm_storeu_ps(or, vaccr);
or += 4;
_mm_storeu_ps(oi, vacci);
oi += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 var = _mm_loadu_ps(ar);
ar += 4;
const __m128 vai = _mm_loadu_ps(ai);
ai += 4;
const __m128 vbr = _mm_loadu_ps(br);
br += 4;
const __m128 vbi = _mm_loadu_ps(bi);
bi += 4;
__m128 vaccr = _mm_mul_ps(var, vbr);
__m128 vacci = _mm_mul_ps(var, vbi);
vaccr = _mm_sub_ps(vaccr, _mm_mul_ps(vai, vbi));
vacci = _mm_add_ps(vacci, _mm_mul_ps(vai, vbr));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) or, vaccr);
or += 2;
_mm_storel_pi((__m64*) oi, vacci);
oi += 2;
vaccr = _mm_movehl_ps(vaccr, vaccr);
vacci = _mm_movehl_ps(vacci, vacci);
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(or, vaccr);
_mm_store_ss(oi, vacci);
}
}
}
| 3,369 | 27.319328 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vcmul/gen/f32-vcmul-wasmsimd-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vcmul/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vcmul_ukernel__wasmsimd_x12(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params* params) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float* ar = input_a;
const float* ai = (const float*) ((uintptr_t) input_a + batch);
const float* br = input_b;
const float* bi = (const float*) ((uintptr_t) input_b + batch);
float* or = output;
float* oi = (float*) ((uintptr_t) output + batch);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const v128_t va0r = wasm_v128_load(ar);
const v128_t va0i = wasm_v128_load(ai);
const v128_t vb0r = wasm_v128_load(br);
const v128_t vb0i = wasm_v128_load(bi);
const v128_t va1r = wasm_v128_load(ar + 4);
const v128_t va1i = wasm_v128_load(ai + 4);
const v128_t vb1r = wasm_v128_load(br + 4);
const v128_t vb1i = wasm_v128_load(bi + 4);
const v128_t va2r = wasm_v128_load(ar + 8);
const v128_t va2i = wasm_v128_load(ai + 8);
const v128_t vb2r = wasm_v128_load(br + 8);
const v128_t vb2i = wasm_v128_load(bi + 8);
ar += 12;
ai += 12;
br += 12;
bi += 12;
v128_t vacc0r = wasm_f32x4_mul(va0r, vb0r);
v128_t vacc0i = wasm_f32x4_mul(va0r, vb0i);
v128_t vacc1r = wasm_f32x4_mul(va1r, vb1r);
v128_t vacc1i = wasm_f32x4_mul(va1r, vb1i);
v128_t vacc2r = wasm_f32x4_mul(va2r, vb2r);
v128_t vacc2i = wasm_f32x4_mul(va2r, vb2i);
vacc0r = wasm_f32x4_sub(vacc0r, wasm_f32x4_mul(va0i, vb0i));
vacc0i = wasm_f32x4_add(vacc0i, wasm_f32x4_mul(va0i, vb0r));
vacc1r = wasm_f32x4_sub(vacc1r, wasm_f32x4_mul(va1i, vb1i));
vacc1i = wasm_f32x4_add(vacc1i, wasm_f32x4_mul(va1i, vb1r));
vacc2r = wasm_f32x4_sub(vacc2r, wasm_f32x4_mul(va2i, vb2i));
vacc2i = wasm_f32x4_add(vacc2i, wasm_f32x4_mul(va2i, vb2r));
wasm_v128_store(or, vacc0r);
wasm_v128_store(oi, vacc0i);
wasm_v128_store(or + 4, vacc1r);
wasm_v128_store(oi + 4, vacc1i);
wasm_v128_store(or + 8, vacc2r);
wasm_v128_store(oi + 8, vacc2i);
or += 12;
oi += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t var = wasm_v128_load(ar);
ar += 4;
const v128_t vai = wasm_v128_load(ai);
ai += 4;
const v128_t vbr = wasm_v128_load(br);
br += 4;
const v128_t vbi = wasm_v128_load(bi);
bi += 4;
v128_t vaccr = wasm_f32x4_mul(var, vbr);
v128_t vacci = wasm_f32x4_mul(var, vbi);
vaccr = wasm_f32x4_sub(vaccr, wasm_f32x4_mul(vai, vbi));
vacci = wasm_f32x4_add(vacci, wasm_f32x4_mul(vai, vbr));
wasm_v128_store(or, vaccr);
or += 4;
wasm_v128_store(oi, vacci);
oi += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t var = wasm_v128_load(ar);
ar += 4;
const v128_t vai = wasm_v128_load(ai);
ai += 4;
const v128_t vbr = wasm_v128_load(br);
br += 4;
const v128_t vbi = wasm_v128_load(bi);
bi += 4;
v128_t vaccr = wasm_f32x4_mul(var, vbr);
v128_t vacci = wasm_f32x4_mul(var, vbi);
vaccr = wasm_f32x4_sub(vaccr, wasm_f32x4_mul(vai, vbi));
vacci = wasm_f32x4_add(vacci, wasm_f32x4_mul(vai, vbr));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(or, vaccr, 0);
or += 2;
wasm_v128_store64_lane(oi, vacci, 0);
oi += 2;
vaccr = wasm_v64x2_shuffle(vaccr, vaccr, 1, 1);
vacci = wasm_v64x2_shuffle(vacci, vacci, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(or, vaccr, 0);
wasm_v128_store32_lane(oi, vacci, 0);
}
}
}
| 4,077 | 30.612403 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vcmul/gen/f32-vcmul-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vcmul/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vcmul_ukernel__wasmsimd_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params* params) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float* ar = input_a;
const float* ai = (const float*) ((uintptr_t) input_a + batch);
const float* br = input_b;
const float* bi = (const float*) ((uintptr_t) input_b + batch);
float* or = output;
float* oi = (float*) ((uintptr_t) output + batch);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0r = wasm_v128_load(ar);
const v128_t va0i = wasm_v128_load(ai);
const v128_t vb0r = wasm_v128_load(br);
const v128_t vb0i = wasm_v128_load(bi);
const v128_t va1r = wasm_v128_load(ar + 4);
const v128_t va1i = wasm_v128_load(ai + 4);
const v128_t vb1r = wasm_v128_load(br + 4);
const v128_t vb1i = wasm_v128_load(bi + 4);
const v128_t va2r = wasm_v128_load(ar + 8);
const v128_t va2i = wasm_v128_load(ai + 8);
const v128_t vb2r = wasm_v128_load(br + 8);
const v128_t vb2i = wasm_v128_load(bi + 8);
const v128_t va3r = wasm_v128_load(ar + 12);
const v128_t va3i = wasm_v128_load(ai + 12);
const v128_t vb3r = wasm_v128_load(br + 12);
const v128_t vb3i = wasm_v128_load(bi + 12);
ar += 16;
ai += 16;
br += 16;
bi += 16;
v128_t vacc0r = wasm_f32x4_mul(va0r, vb0r);
v128_t vacc0i = wasm_f32x4_mul(va0r, vb0i);
v128_t vacc1r = wasm_f32x4_mul(va1r, vb1r);
v128_t vacc1i = wasm_f32x4_mul(va1r, vb1i);
v128_t vacc2r = wasm_f32x4_mul(va2r, vb2r);
v128_t vacc2i = wasm_f32x4_mul(va2r, vb2i);
v128_t vacc3r = wasm_f32x4_mul(va3r, vb3r);
v128_t vacc3i = wasm_f32x4_mul(va3r, vb3i);
vacc0r = wasm_f32x4_sub(vacc0r, wasm_f32x4_mul(va0i, vb0i));
vacc0i = wasm_f32x4_add(vacc0i, wasm_f32x4_mul(va0i, vb0r));
vacc1r = wasm_f32x4_sub(vacc1r, wasm_f32x4_mul(va1i, vb1i));
vacc1i = wasm_f32x4_add(vacc1i, wasm_f32x4_mul(va1i, vb1r));
vacc2r = wasm_f32x4_sub(vacc2r, wasm_f32x4_mul(va2i, vb2i));
vacc2i = wasm_f32x4_add(vacc2i, wasm_f32x4_mul(va2i, vb2r));
vacc3r = wasm_f32x4_sub(vacc3r, wasm_f32x4_mul(va3i, vb3i));
vacc3i = wasm_f32x4_add(vacc3i, wasm_f32x4_mul(va3i, vb3r));
wasm_v128_store(or, vacc0r);
wasm_v128_store(oi, vacc0i);
wasm_v128_store(or + 4, vacc1r);
wasm_v128_store(oi + 4, vacc1i);
wasm_v128_store(or + 8, vacc2r);
wasm_v128_store(oi + 8, vacc2i);
wasm_v128_store(or + 12, vacc3r);
wasm_v128_store(oi + 12, vacc3i);
or += 16;
oi += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t var = wasm_v128_load(ar);
ar += 4;
const v128_t vai = wasm_v128_load(ai);
ai += 4;
const v128_t vbr = wasm_v128_load(br);
br += 4;
const v128_t vbi = wasm_v128_load(bi);
bi += 4;
v128_t vaccr = wasm_f32x4_mul(var, vbr);
v128_t vacci = wasm_f32x4_mul(var, vbi);
vaccr = wasm_f32x4_sub(vaccr, wasm_f32x4_mul(vai, vbi));
vacci = wasm_f32x4_add(vacci, wasm_f32x4_mul(vai, vbr));
wasm_v128_store(or, vaccr);
or += 4;
wasm_v128_store(oi, vacci);
oi += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t var = wasm_v128_load(ar);
ar += 4;
const v128_t vai = wasm_v128_load(ai);
ai += 4;
const v128_t vbr = wasm_v128_load(br);
br += 4;
const v128_t vbi = wasm_v128_load(bi);
bi += 4;
v128_t vaccr = wasm_f32x4_mul(var, vbr);
v128_t vacci = wasm_f32x4_mul(var, vbi);
vaccr = wasm_f32x4_sub(vaccr, wasm_f32x4_mul(vai, vbi));
vacci = wasm_f32x4_add(vacci, wasm_f32x4_mul(vai, vbr));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(or, vaccr, 0);
or += 2;
wasm_v128_store64_lane(oi, vacci, 0);
oi += 2;
vaccr = wasm_v64x2_shuffle(vaccr, vaccr, 1, 1);
vacci = wasm_v64x2_shuffle(vacci, vacci, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(or, vaccr, 0);
wasm_v128_store32_lane(oi, vacci, 0);
}
}
}
| 4,575 | 31.920863 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vcmul/gen/f32-vcmul-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vcmul/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vcmul_ukernel__wasmsimd_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params* params) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float* ar = input_a;
const float* ai = (const float*) ((uintptr_t) input_a + batch);
const float* br = input_b;
const float* bi = (const float*) ((uintptr_t) input_b + batch);
float* or = output;
float* oi = (float*) ((uintptr_t) output + batch);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t var = wasm_v128_load(ar);
ar += 4;
const v128_t vai = wasm_v128_load(ai);
ai += 4;
const v128_t vbr = wasm_v128_load(br);
br += 4;
const v128_t vbi = wasm_v128_load(bi);
bi += 4;
v128_t vaccr = wasm_f32x4_mul(var, vbr);
v128_t vacci = wasm_f32x4_mul(var, vbi);
vaccr = wasm_f32x4_sub(vaccr, wasm_f32x4_mul(vai, vbi));
vacci = wasm_f32x4_add(vacci, wasm_f32x4_mul(vai, vbr));
wasm_v128_store(or, vaccr);
or += 4;
wasm_v128_store(oi, vacci);
oi += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t var = wasm_v128_load(ar);
ar += 4;
const v128_t vai = wasm_v128_load(ai);
ai += 4;
const v128_t vbr = wasm_v128_load(br);
br += 4;
const v128_t vbi = wasm_v128_load(bi);
bi += 4;
v128_t vaccr = wasm_f32x4_mul(var, vbr);
v128_t vacci = wasm_f32x4_mul(var, vbi);
vaccr = wasm_f32x4_sub(vaccr, wasm_f32x4_mul(vai, vbi));
vacci = wasm_f32x4_add(vacci, wasm_f32x4_mul(vai, vbr));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(or, vaccr, 0);
or += 2;
wasm_v128_store64_lane(oi, vacci, 0);
oi += 2;
vaccr = wasm_v64x2_shuffle(vaccr, vaccr, 1, 1);
vacci = wasm_v64x2_shuffle(vacci, vacci, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(or, vaccr, 0);
wasm_v128_store32_lane(oi, vacci, 0);
}
}
}
| 2,464 | 27.011364 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vcmul/gen/f32-vcmul-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vcmul/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vcmul_ukernel__wasmsimd_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params* params) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float* ar = input_a;
const float* ai = (const float*) ((uintptr_t) input_a + batch);
const float* br = input_b;
const float* bi = (const float*) ((uintptr_t) input_b + batch);
float* or = output;
float* oi = (float*) ((uintptr_t) output + batch);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0r = wasm_v128_load(ar);
const v128_t va0i = wasm_v128_load(ai);
const v128_t vb0r = wasm_v128_load(br);
const v128_t vb0i = wasm_v128_load(bi);
const v128_t va1r = wasm_v128_load(ar + 4);
const v128_t va1i = wasm_v128_load(ai + 4);
const v128_t vb1r = wasm_v128_load(br + 4);
const v128_t vb1i = wasm_v128_load(bi + 4);
ar += 8;
ai += 8;
br += 8;
bi += 8;
v128_t vacc0r = wasm_f32x4_mul(va0r, vb0r);
v128_t vacc0i = wasm_f32x4_mul(va0r, vb0i);
v128_t vacc1r = wasm_f32x4_mul(va1r, vb1r);
v128_t vacc1i = wasm_f32x4_mul(va1r, vb1i);
vacc0r = wasm_f32x4_sub(vacc0r, wasm_f32x4_mul(va0i, vb0i));
vacc0i = wasm_f32x4_add(vacc0i, wasm_f32x4_mul(va0i, vb0r));
vacc1r = wasm_f32x4_sub(vacc1r, wasm_f32x4_mul(va1i, vb1i));
vacc1i = wasm_f32x4_add(vacc1i, wasm_f32x4_mul(va1i, vb1r));
wasm_v128_store(or, vacc0r);
wasm_v128_store(oi, vacc0i);
wasm_v128_store(or + 4, vacc1r);
wasm_v128_store(oi + 4, vacc1i);
or += 8;
oi += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t var = wasm_v128_load(ar);
ar += 4;
const v128_t vai = wasm_v128_load(ai);
ai += 4;
const v128_t vbr = wasm_v128_load(br);
br += 4;
const v128_t vbi = wasm_v128_load(bi);
bi += 4;
v128_t vaccr = wasm_f32x4_mul(var, vbr);
v128_t vacci = wasm_f32x4_mul(var, vbi);
vaccr = wasm_f32x4_sub(vaccr, wasm_f32x4_mul(vai, vbi));
vacci = wasm_f32x4_add(vacci, wasm_f32x4_mul(vai, vbr));
wasm_v128_store(or, vaccr);
or += 4;
wasm_v128_store(oi, vacci);
oi += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t var = wasm_v128_load(ar);
ar += 4;
const v128_t vai = wasm_v128_load(ai);
ai += 4;
const v128_t vbr = wasm_v128_load(br);
br += 4;
const v128_t vbi = wasm_v128_load(bi);
bi += 4;
v128_t vaccr = wasm_f32x4_mul(var, vbr);
v128_t vacci = wasm_f32x4_mul(var, vbi);
vaccr = wasm_f32x4_sub(vaccr, wasm_f32x4_mul(vai, vbi));
vacci = wasm_f32x4_add(vacci, wasm_f32x4_mul(vai, vbr));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(or, vaccr, 0);
or += 2;
wasm_v128_store64_lane(oi, vacci, 0);
oi += 2;
vaccr = wasm_v64x2_shuffle(vaccr, vaccr, 1, 1);
vacci = wasm_v64x2_shuffle(vacci, vacci, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(or, vaccr, 0);
wasm_v128_store32_lane(oi, vacci, 0);
}
}
}
| 3,576 | 29.058824 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx-rr2-lut4-p4-perm-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx-rr2-lut4-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx_rr2_lut4_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx_rr2_lut4_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx_rr2_lut4_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_rr2_lut4_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_lut4_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_lut4_p4.log2e);
const __m256 vindex_mask = _mm256_load_ps((const float*) params->avx_rr2_lut4_p4.index_mask);
const __m256 vtable = _mm256_load_ps(params->avx_rr2_lut4_p4.table);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_lut4_p4.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_lut4_p4.minus_ln2_lo);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_lut4_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_lut4_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_lut4_p4.c2);
const __m256 vone = _mm256_load_ps(params->avx_rr2_lut4_p4.one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
input += 16;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
__m256 ven0 = _mm256_andnot_ps(vindex_mask, vn0);
const __m256 vl0 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0));
const __m128 ven0_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven0)), 21));
__m256 ven1 = _mm256_andnot_ps(vindex_mask, vn1);
const __m256 vl1 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1));
const __m128 ven1_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven1)), 21));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m128 ven0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven0, 1)), 21));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m128 ven1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven1, 1)), 21));
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
ven0 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven0_lo), ven0_hi, 1);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
ven1 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven1_lo), ven1_hi, 1);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
__m256 vs0 = _mm256_mul_ps(vl0, ven0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
__m256 vs1 = _mm256_mul_ps(vl1, ven1);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc4, vt0), vc3);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc4, vt1), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt0 = _mm256_mul_ps(vt0, vs0);
vs0 = _mm256_sub_ps(vs0, vone);
vt1 = _mm256_mul_ps(vt1, vs1);
vs1 = _mm256_sub_ps(vs1, vone);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vt0);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vt1);
const __m256 ve0 = _mm256_mul_ps(_mm256_add_ps(vp0, vs0), valpha);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_mul_ps(_mm256_add_ps(vp1, vs1), valpha);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
__m256 ven = _mm256_andnot_ps(vindex_mask, vn);
const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vs = _mm256_mul_ps(vl, ven);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p6.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
__m256 ven = _mm256_andnot_ps(vindex_mask, vn);
const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vs = _mm256_mul_ps(vl, ven);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 7,859 | 40.808511 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx-rr2-lut4-p4-perm-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx-rr2-lut4-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx_rr2_lut4_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx_rr2_lut4_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx_rr2_lut4_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_rr2_lut4_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_lut4_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_lut4_p4.log2e);
const __m256 vindex_mask = _mm256_load_ps((const float*) params->avx_rr2_lut4_p4.index_mask);
const __m256 vtable = _mm256_load_ps(params->avx_rr2_lut4_p4.table);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_lut4_p4.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_lut4_p4.minus_ln2_lo);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_lut4_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_lut4_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_lut4_p4.c2);
const __m256 vone = _mm256_load_ps(params->avx_rr2_lut4_p4.one);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
input += 24;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
__m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
__m256 ven0 = _mm256_andnot_ps(vindex_mask, vn0);
const __m256 vl0 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0));
const __m128 ven0_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven0)), 21));
__m256 ven1 = _mm256_andnot_ps(vindex_mask, vn1);
const __m256 vl1 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1));
const __m128 ven1_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven1)), 21));
__m256 ven2 = _mm256_andnot_ps(vindex_mask, vn2);
const __m256 vl2 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn2));
const __m128 ven2_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven2)), 21));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m128 ven0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven0, 1)), 21));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m128 ven1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven1, 1)), 21));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m128 ven2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven2, 1)), 21));
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
ven0 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven0_lo), ven0_hi, 1);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
ven1 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven1_lo), ven1_hi, 1);
__m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
ven2 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven2_lo), ven2_hi, 1);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
__m256 vs0 = _mm256_mul_ps(vl0, ven0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
__m256 vs1 = _mm256_mul_ps(vl1, ven1);
vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
__m256 vs2 = _mm256_mul_ps(vl2, ven2);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc4, vt0), vc3);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc4, vt1), vc3);
__m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc4, vt2), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vp1 = _mm256_mul_ps(vp1, vt1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt0 = _mm256_mul_ps(vt0, vs0);
vs0 = _mm256_sub_ps(vs0, vone);
vt1 = _mm256_mul_ps(vt1, vs1);
vs1 = _mm256_sub_ps(vs1, vone);
vt2 = _mm256_mul_ps(vt2, vs2);
vs2 = _mm256_sub_ps(vs2, vone);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vt0);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vt1);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vt2);
const __m256 ve0 = _mm256_mul_ps(_mm256_add_ps(vp0, vs0), valpha);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_mul_ps(_mm256_add_ps(vp1, vs1), valpha);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_mul_ps(_mm256_add_ps(vp2, vs2), valpha);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
__m256 ven = _mm256_andnot_ps(vindex_mask, vn);
const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vs = _mm256_mul_ps(vl, ven);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p6.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
__m256 ven = _mm256_andnot_ps(vindex_mask, vn);
const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vs = _mm256_mul_ps(vl, ven);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 9,204 | 42.833333 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx-rr2-lut4-p4-perm-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx-rr2-lut4-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx_rr2_lut4_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx_rr2_lut4_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx_rr2_lut4_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_rr2_lut4_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_lut4_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_lut4_p4.log2e);
const __m256 vindex_mask = _mm256_load_ps((const float*) params->avx_rr2_lut4_p4.index_mask);
const __m256 vtable = _mm256_load_ps(params->avx_rr2_lut4_p4.table);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_lut4_p4.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_lut4_p4.minus_ln2_lo);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_lut4_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_lut4_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_lut4_p4.c2);
const __m256 vone = _mm256_load_ps(params->avx_rr2_lut4_p4.one);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
input += 32;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
__m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
__m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
__m256 ven0 = _mm256_andnot_ps(vindex_mask, vn0);
const __m256 vl0 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0));
const __m128 ven0_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven0)), 21));
__m256 ven1 = _mm256_andnot_ps(vindex_mask, vn1);
const __m256 vl1 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1));
const __m128 ven1_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven1)), 21));
__m256 ven2 = _mm256_andnot_ps(vindex_mask, vn2);
const __m256 vl2 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn2));
const __m128 ven2_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven2)), 21));
__m256 ven3 = _mm256_andnot_ps(vindex_mask, vn3);
const __m256 vl3 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn3));
const __m128 ven3_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven3)), 21));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m128 ven0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven0, 1)), 21));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m128 ven1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven1, 1)), 21));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m128 ven2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven2, 1)), 21));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m128 ven3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven3, 1)), 21));
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
ven0 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven0_lo), ven0_hi, 1);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
ven1 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven1_lo), ven1_hi, 1);
__m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
ven2 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven2_lo), ven2_hi, 1);
__m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
ven3 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven3_lo), ven3_hi, 1);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
__m256 vs0 = _mm256_mul_ps(vl0, ven0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
__m256 vs1 = _mm256_mul_ps(vl1, ven1);
vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
__m256 vs2 = _mm256_mul_ps(vl2, ven2);
vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
__m256 vs3 = _mm256_mul_ps(vl3, ven3);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc4, vt0), vc3);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc4, vt1), vc3);
__m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc4, vt2), vc3);
__m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc4, vt3), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vp1 = _mm256_mul_ps(vp1, vt1);
vp2 = _mm256_mul_ps(vp2, vt2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt0 = _mm256_mul_ps(vt0, vs0);
vs0 = _mm256_sub_ps(vs0, vone);
vt1 = _mm256_mul_ps(vt1, vs1);
vs1 = _mm256_sub_ps(vs1, vone);
vt2 = _mm256_mul_ps(vt2, vs2);
vs2 = _mm256_sub_ps(vs2, vone);
vt3 = _mm256_mul_ps(vt3, vs3);
vs3 = _mm256_sub_ps(vs3, vone);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vt0);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vt1);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vt2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vt3);
const __m256 ve0 = _mm256_mul_ps(_mm256_add_ps(vp0, vs0), valpha);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_mul_ps(_mm256_add_ps(vp1, vs1), valpha);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_mul_ps(_mm256_add_ps(vp2, vs2), valpha);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_mul_ps(_mm256_add_ps(vp3, vs3), valpha);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
__m256 ven = _mm256_andnot_ps(vindex_mask, vn);
const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vs = _mm256_mul_ps(vl, ven);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p6.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
__m256 ven = _mm256_andnot_ps(vindex_mask, vn);
const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vs = _mm256_mul_ps(vl, ven);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 10,549 | 44.474138 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx-rr2-lut4-p4-perm-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx-rr2-lut4-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x40(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx_rr2_lut4_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx_rr2_lut4_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx_rr2_lut4_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_rr2_lut4_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_lut4_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_lut4_p4.log2e);
const __m256 vindex_mask = _mm256_load_ps((const float*) params->avx_rr2_lut4_p4.index_mask);
const __m256 vtable = _mm256_load_ps(params->avx_rr2_lut4_p4.table);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_lut4_p4.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_lut4_p4.minus_ln2_lo);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_lut4_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_lut4_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_lut4_p4.c2);
const __m256 vone = _mm256_load_ps(params->avx_rr2_lut4_p4.one);
for (; batch >= 40 * sizeof(float); batch -= 40 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
input += 40;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
__m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
__m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
__m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias);
__m256 ven0 = _mm256_andnot_ps(vindex_mask, vn0);
const __m256 vl0 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0));
const __m128 ven0_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven0)), 21));
__m256 ven1 = _mm256_andnot_ps(vindex_mask, vn1);
const __m256 vl1 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1));
const __m128 ven1_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven1)), 21));
__m256 ven2 = _mm256_andnot_ps(vindex_mask, vn2);
const __m256 vl2 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn2));
const __m128 ven2_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven2)), 21));
__m256 ven3 = _mm256_andnot_ps(vindex_mask, vn3);
const __m256 vl3 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn3));
const __m128 ven3_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven3)), 21));
__m256 ven4 = _mm256_andnot_ps(vindex_mask, vn4);
const __m256 vl4 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn4));
const __m128 ven4_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven4)), 21));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m128 ven0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven0, 1)), 21));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m128 ven1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven1, 1)), 21));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m128 ven2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven2, 1)), 21));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m128 ven3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven3, 1)), 21));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m128 ven4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven4, 1)), 21));
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
ven0 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven0_lo), ven0_hi, 1);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
ven1 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven1_lo), ven1_hi, 1);
__m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
ven2 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven2_lo), ven2_hi, 1);
__m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
ven3 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven3_lo), ven3_hi, 1);
__m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_hi), vz4);
ven4 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven4_lo), ven4_hi, 1);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
__m256 vs0 = _mm256_mul_ps(vl0, ven0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
__m256 vs1 = _mm256_mul_ps(vl1, ven1);
vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
__m256 vs2 = _mm256_mul_ps(vl2, ven2);
vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
__m256 vs3 = _mm256_mul_ps(vl3, ven3);
vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_lo), vt4);
__m256 vs4 = _mm256_mul_ps(vl4, ven4);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc4, vt0), vc3);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc4, vt1), vc3);
__m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc4, vt2), vc3);
__m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc4, vt3), vc3);
__m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc4, vt4), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vp1 = _mm256_mul_ps(vp1, vt1);
vp2 = _mm256_mul_ps(vp2, vt2);
vp3 = _mm256_mul_ps(vp3, vt3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt0 = _mm256_mul_ps(vt0, vs0);
vs0 = _mm256_sub_ps(vs0, vone);
vt1 = _mm256_mul_ps(vt1, vs1);
vs1 = _mm256_sub_ps(vs1, vone);
vt2 = _mm256_mul_ps(vt2, vs2);
vs2 = _mm256_sub_ps(vs2, vone);
vt3 = _mm256_mul_ps(vt3, vs3);
vs3 = _mm256_sub_ps(vs3, vone);
vt4 = _mm256_mul_ps(vt4, vs4);
vs4 = _mm256_sub_ps(vs4, vone);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vt0);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vt1);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vt2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vt3);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vt4);
const __m256 ve0 = _mm256_mul_ps(_mm256_add_ps(vp0, vs0), valpha);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_mul_ps(_mm256_add_ps(vp1, vs1), valpha);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_mul_ps(_mm256_add_ps(vp2, vs2), valpha);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_mul_ps(_mm256_add_ps(vp3, vs3), valpha);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_mul_ps(_mm256_add_ps(vp4, vs4), valpha);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
output += 40;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
__m256 ven = _mm256_andnot_ps(vindex_mask, vn);
const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vs = _mm256_mul_ps(vl, ven);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p6.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
__m256 ven = _mm256_andnot_ps(vindex_mask, vn);
const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vs = _mm256_mul_ps(vl, ven);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 11,894 | 45.830709 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx-rr2-lut4-p4-perm-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx-rr2-lut4-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx_rr2_lut4_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx_rr2_lut4_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx_rr2_lut4_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_rr2_lut4_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_lut4_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_lut4_p4.log2e);
const __m256 vindex_mask = _mm256_load_ps((const float*) params->avx_rr2_lut4_p4.index_mask);
const __m256 vtable = _mm256_load_ps(params->avx_rr2_lut4_p4.table);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_lut4_p4.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_lut4_p4.minus_ln2_lo);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_lut4_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_lut4_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_lut4_p4.c2);
const __m256 vone = _mm256_load_ps(params->avx_rr2_lut4_p4.one);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
__m256 vx5 = _mm256_loadu_ps(input + 40);
input += 48;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
__m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
__m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
__m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias);
__m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias);
__m256 ven0 = _mm256_andnot_ps(vindex_mask, vn0);
const __m256 vl0 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0));
const __m128 ven0_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven0)), 21));
__m256 ven1 = _mm256_andnot_ps(vindex_mask, vn1);
const __m256 vl1 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1));
const __m128 ven1_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven1)), 21));
__m256 ven2 = _mm256_andnot_ps(vindex_mask, vn2);
const __m256 vl2 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn2));
const __m128 ven2_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven2)), 21));
__m256 ven3 = _mm256_andnot_ps(vindex_mask, vn3);
const __m256 vl3 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn3));
const __m128 ven3_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven3)), 21));
__m256 ven4 = _mm256_andnot_ps(vindex_mask, vn4);
const __m256 vl4 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn4));
const __m128 ven4_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven4)), 21));
__m256 ven5 = _mm256_andnot_ps(vindex_mask, vn5);
const __m256 vl5 = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn5));
const __m128 ven5_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven5)), 21));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m128 ven0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven0, 1)), 21));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m128 ven1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven1, 1)), 21));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m128 ven2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven2, 1)), 21));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m128 ven3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven3, 1)), 21));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m128 ven4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven4, 1)), 21));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m128 ven5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven5, 1)), 21));
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
ven0 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven0_lo), ven0_hi, 1);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
ven1 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven1_lo), ven1_hi, 1);
__m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
ven2 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven2_lo), ven2_hi, 1);
__m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
ven3 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven3_lo), ven3_hi, 1);
__m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_hi), vz4);
ven4 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven4_lo), ven4_hi, 1);
__m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_hi), vz5);
ven5 = _mm256_insertf128_ps(_mm256_castps128_ps256(ven5_lo), ven5_hi, 1);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
__m256 vs0 = _mm256_mul_ps(vl0, ven0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
__m256 vs1 = _mm256_mul_ps(vl1, ven1);
vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
__m256 vs2 = _mm256_mul_ps(vl2, ven2);
vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
__m256 vs3 = _mm256_mul_ps(vl3, ven3);
vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_lo), vt4);
__m256 vs4 = _mm256_mul_ps(vl4, ven4);
vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_lo), vt5);
__m256 vs5 = _mm256_mul_ps(vl5, ven5);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc4, vt0), vc3);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc4, vt1), vc3);
__m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc4, vt2), vc3);
__m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc4, vt3), vc3);
__m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc4, vt4), vc3);
__m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc4, vt5), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc2);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vp1 = _mm256_mul_ps(vp1, vt1);
vp2 = _mm256_mul_ps(vp2, vt2);
vp3 = _mm256_mul_ps(vp3, vt3);
vp4 = _mm256_mul_ps(vp4, vt4);
vp5 = _mm256_mul_ps(vp5, vt5);
vt0 = _mm256_mul_ps(vt0, vs0);
vs0 = _mm256_sub_ps(vs0, vone);
vt1 = _mm256_mul_ps(vt1, vs1);
vs1 = _mm256_sub_ps(vs1, vone);
vt2 = _mm256_mul_ps(vt2, vs2);
vs2 = _mm256_sub_ps(vs2, vone);
vt3 = _mm256_mul_ps(vt3, vs3);
vs3 = _mm256_sub_ps(vs3, vone);
vt4 = _mm256_mul_ps(vt4, vs4);
vs4 = _mm256_sub_ps(vs4, vone);
vt5 = _mm256_mul_ps(vt5, vs5);
vs5 = _mm256_sub_ps(vs5, vone);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vt0);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vt1);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vt2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vt3);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vt4);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vt5);
const __m256 ve0 = _mm256_mul_ps(_mm256_add_ps(vp0, vs0), valpha);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_mul_ps(_mm256_add_ps(vp1, vs1), valpha);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_mul_ps(_mm256_add_ps(vp2, vs2), valpha);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_mul_ps(_mm256_add_ps(vp3, vs3), valpha);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_mul_ps(_mm256_add_ps(vp4, vs4), valpha);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 ve5 = _mm256_mul_ps(_mm256_add_ps(vp5, vs5), valpha);
vx5 = _mm256_mul_ps(vx5, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
output += 48;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
__m256 ven = _mm256_andnot_ps(vindex_mask, vn);
const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vs = _mm256_mul_ps(vl, ven);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p6.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
__m256 ven = _mm256_andnot_ps(vindex_mask, vn);
const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vs = _mm256_mul_ps(vl, ven);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 13,239 | 46.971014 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx-rr2-lut4-p4-perm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx-rr2-lut4-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx_rr2_lut4_p4_perm_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx_rr2_lut4_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx_rr2_lut4_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx_rr2_lut4_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_rr2_lut4_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_lut4_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_lut4_p4.log2e);
const __m256 vindex_mask = _mm256_load_ps((const float*) params->avx_rr2_lut4_p4.index_mask);
const __m256 vtable = _mm256_load_ps(params->avx_rr2_lut4_p4.table);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_lut4_p4.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_lut4_p4.minus_ln2_lo);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_lut4_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_lut4_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_lut4_p4.c2);
const __m256 vone = _mm256_load_ps(params->avx_rr2_lut4_p4.one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
__m256 ven = _mm256_andnot_ps(vindex_mask, vn);
const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vs = _mm256_mul_ps(vl, ven);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p6.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
__m256 ven = _mm256_andnot_ps(vindex_mask, vn);
const __m256 vl = _mm256_permutevar_ps(vtable, _mm256_castps_si256(vn));
const __m128 ven_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(ven)), 21));
vn = _mm256_sub_ps(vn, vmagic_bias);
const __m128 ven_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(ven, 1)), 21));
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
ven = _mm256_insertf128_ps(_mm256_castps128_ps256(ven_lo), ven_hi, 1);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vs = _mm256_mul_ps(vl, ven);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc4, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 5,058 | 39.150794 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx-rr2-p6-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx_rr2_p6_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx_rr2_p6.prescale);
const __m256 valpha = _mm256_load_ps(params->avx_rr2_p6.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx_rr2_p6.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_rr2_p6.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p6.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p6.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p6.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p6.minus_ln2_lo);
const __m256 vc6 = _mm256_load_ps(params->avx_rr2_p6.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p6.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p6.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p6.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p6.c2);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p6.one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
input += 16;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
const __m128 vs0_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m128 vs1_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
__m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs0_lo), vs0_hi, 1);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
__m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs1_lo), vs1_hi, 1);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc6, vt0), vc5);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc6, vt1), vc5);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc4);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc4);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt0 = _mm256_mul_ps(vt0, vs0);
vs0 = _mm256_sub_ps(vs0, vone);
vt1 = _mm256_mul_ps(vt1, vs1);
vs1 = _mm256_sub_ps(vs1, vone);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vt0);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vt1);
const __m256 ve0 = _mm256_mul_ps(_mm256_add_ps(vp0, vs0), valpha);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_mul_ps(_mm256_add_ps(vp1, vs1), valpha);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
__m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p6.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
__m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 7,471 | 39.389189 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx-rr2-p6-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx_rr2_p6_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx_rr2_p6.prescale);
const __m256 valpha = _mm256_load_ps(params->avx_rr2_p6.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx_rr2_p6.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_rr2_p6.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p6.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p6.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p6.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p6.minus_ln2_lo);
const __m256 vc6 = _mm256_load_ps(params->avx_rr2_p6.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p6.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p6.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p6.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p6.c2);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p6.one);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
input += 24;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
__m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
const __m128 vs0_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m128 vs1_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m128 vs2_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
__m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs0_lo), vs0_hi, 1);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
__m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs1_lo), vs1_hi, 1);
__m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
__m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs2_lo), vs2_hi, 1);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc6, vt0), vc5);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc6, vt1), vc5);
__m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc6, vt2), vc5);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc4);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc4);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc4);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vp1 = _mm256_mul_ps(vp1, vt1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt0 = _mm256_mul_ps(vt0, vs0);
vs0 = _mm256_sub_ps(vs0, vone);
vt1 = _mm256_mul_ps(vt1, vs1);
vs1 = _mm256_sub_ps(vs1, vone);
vt2 = _mm256_mul_ps(vt2, vs2);
vs2 = _mm256_sub_ps(vs2, vone);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vt0);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vt1);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vt2);
const __m256 ve0 = _mm256_mul_ps(_mm256_add_ps(vp0, vs0), valpha);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_mul_ps(_mm256_add_ps(vp1, vs1), valpha);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_mul_ps(_mm256_add_ps(vp2, vs2), valpha);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
__m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p6.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
__m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 8,750 | 41.480583 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx-rr2-p6-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx_rr2_p6_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx_rr2_p6.prescale);
const __m256 valpha = _mm256_load_ps(params->avx_rr2_p6.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx_rr2_p6.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_rr2_p6.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p6.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p6.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p6.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p6.minus_ln2_lo);
const __m256 vc6 = _mm256_load_ps(params->avx_rr2_p6.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p6.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p6.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p6.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p6.c2);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p6.one);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
input += 32;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
__m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
__m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
const __m128 vs0_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m128 vs1_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m128 vs2_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m128 vs3_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23));
const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn3, 1)), 23));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
__m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs0_lo), vs0_hi, 1);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
__m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs1_lo), vs1_hi, 1);
__m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
__m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs2_lo), vs2_hi, 1);
__m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
__m256 vs3 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs3_lo), vs3_hi, 1);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc6, vt0), vc5);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc6, vt1), vc5);
__m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc6, vt2), vc5);
__m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc6, vt3), vc5);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc4);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc4);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc4);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc4);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vp1 = _mm256_mul_ps(vp1, vt1);
vp2 = _mm256_mul_ps(vp2, vt2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt0 = _mm256_mul_ps(vt0, vs0);
vs0 = _mm256_sub_ps(vs0, vone);
vt1 = _mm256_mul_ps(vt1, vs1);
vs1 = _mm256_sub_ps(vs1, vone);
vt2 = _mm256_mul_ps(vt2, vs2);
vs2 = _mm256_sub_ps(vs2, vone);
vt3 = _mm256_mul_ps(vt3, vs3);
vs3 = _mm256_sub_ps(vs3, vone);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vt0);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vt1);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vt2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vt3);
const __m256 ve0 = _mm256_mul_ps(_mm256_add_ps(vp0, vs0), valpha);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_mul_ps(_mm256_add_ps(vp1, vs1), valpha);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_mul_ps(_mm256_add_ps(vp2, vs2), valpha);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_mul_ps(_mm256_add_ps(vp3, vs3), valpha);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
__m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p6.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
__m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 10,029 | 43.185022 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx-rr2-p6-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx_rr2_p6_x40(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx_rr2_p6.prescale);
const __m256 valpha = _mm256_load_ps(params->avx_rr2_p6.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx_rr2_p6.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_rr2_p6.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p6.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p6.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p6.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p6.minus_ln2_lo);
const __m256 vc6 = _mm256_load_ps(params->avx_rr2_p6.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p6.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p6.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p6.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p6.c2);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p6.one);
for (; batch >= 40 * sizeof(float); batch -= 40 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
input += 40;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
__m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
__m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
__m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias);
const __m128 vs0_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m128 vs1_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m128 vs2_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m128 vs3_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23));
const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn3, 1)), 23));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m128 vs4_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23));
const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn4, 1)), 23));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
__m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs0_lo), vs0_hi, 1);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
__m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs1_lo), vs1_hi, 1);
__m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
__m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs2_lo), vs2_hi, 1);
__m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
__m256 vs3 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs3_lo), vs3_hi, 1);
__m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_hi), vz4);
__m256 vs4 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs4_lo), vs4_hi, 1);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_lo), vt4);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc6, vt0), vc5);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc6, vt1), vc5);
__m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc6, vt2), vc5);
__m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc6, vt3), vc5);
__m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc6, vt4), vc5);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc4);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc4);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc4);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc4);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc4);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vp1 = _mm256_mul_ps(vp1, vt1);
vp2 = _mm256_mul_ps(vp2, vt2);
vp3 = _mm256_mul_ps(vp3, vt3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt0 = _mm256_mul_ps(vt0, vs0);
vs0 = _mm256_sub_ps(vs0, vone);
vt1 = _mm256_mul_ps(vt1, vs1);
vs1 = _mm256_sub_ps(vs1, vone);
vt2 = _mm256_mul_ps(vt2, vs2);
vs2 = _mm256_sub_ps(vs2, vone);
vt3 = _mm256_mul_ps(vt3, vs3);
vs3 = _mm256_sub_ps(vs3, vone);
vt4 = _mm256_mul_ps(vt4, vs4);
vs4 = _mm256_sub_ps(vs4, vone);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vt0);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vt1);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vt2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vt3);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vt4);
const __m256 ve0 = _mm256_mul_ps(_mm256_add_ps(vp0, vs0), valpha);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_mul_ps(_mm256_add_ps(vp1, vs1), valpha);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_mul_ps(_mm256_add_ps(vp2, vs2), valpha);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_mul_ps(_mm256_add_ps(vp3, vs3), valpha);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_mul_ps(_mm256_add_ps(vp4, vs4), valpha);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
output += 40;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
__m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p6.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
__m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 11,308 | 44.600806 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx-rr2-p6-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx_rr2_p6_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx_rr2_p6.prescale);
const __m256 valpha = _mm256_load_ps(params->avx_rr2_p6.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx_rr2_p6.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_rr2_p6.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p6.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p6.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p6.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p6.minus_ln2_lo);
const __m256 vc6 = _mm256_load_ps(params->avx_rr2_p6.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p6.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p6.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p6.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p6.c2);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p6.one);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
__m256 vx5 = _mm256_loadu_ps(input + 40);
input += 48;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
__m256 vn0 = _mm256_add_ps(_mm256_mul_ps(vz0, vlog2e), vmagic_bias);
__m256 vn1 = _mm256_add_ps(_mm256_mul_ps(vz1, vlog2e), vmagic_bias);
__m256 vn2 = _mm256_add_ps(_mm256_mul_ps(vz2, vlog2e), vmagic_bias);
__m256 vn3 = _mm256_add_ps(_mm256_mul_ps(vz3, vlog2e), vmagic_bias);
__m256 vn4 = _mm256_add_ps(_mm256_mul_ps(vz4, vlog2e), vmagic_bias);
__m256 vn5 = _mm256_add_ps(_mm256_mul_ps(vz5, vlog2e), vmagic_bias);
const __m128 vs0_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn0)), 23));
const __m128 vs0_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn0, 1)), 23));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m128 vs1_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn1)), 23));
const __m128 vs1_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn1, 1)), 23));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m128 vs2_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn2)), 23));
const __m128 vs2_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn2, 1)), 23));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m128 vs3_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn3)), 23));
const __m128 vs3_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn3, 1)), 23));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m128 vs4_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn4)), 23));
const __m128 vs4_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn4, 1)), 23));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m128 vs5_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn5)), 23));
const __m128 vs5_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn5, 1)), 23));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
__m256 vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_hi), vz0);
__m256 vs0 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs0_lo), vs0_hi, 1);
__m256 vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_hi), vz1);
__m256 vs1 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs1_lo), vs1_hi, 1);
__m256 vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_hi), vz2);
__m256 vs2 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs2_lo), vs2_hi, 1);
__m256 vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_hi), vz3);
__m256 vs3 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs3_lo), vs3_hi, 1);
__m256 vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_hi), vz4);
__m256 vs4 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs4_lo), vs4_hi, 1);
__m256 vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_hi), vz5);
__m256 vs5 = _mm256_insertf128_ps(_mm256_castps128_ps256(vs5_lo), vs5_hi, 1);
vt0 = _mm256_add_ps(_mm256_mul_ps(vn0, vminus_ln2_lo), vt0);
vt1 = _mm256_add_ps(_mm256_mul_ps(vn1, vminus_ln2_lo), vt1);
vt2 = _mm256_add_ps(_mm256_mul_ps(vn2, vminus_ln2_lo), vt2);
vt3 = _mm256_add_ps(_mm256_mul_ps(vn3, vminus_ln2_lo), vt3);
vt4 = _mm256_add_ps(_mm256_mul_ps(vn4, vminus_ln2_lo), vt4);
vt5 = _mm256_add_ps(_mm256_mul_ps(vn5, vminus_ln2_lo), vt5);
__m256 vp0 = _mm256_add_ps(_mm256_mul_ps(vc6, vt0), vc5);
__m256 vp1 = _mm256_add_ps(_mm256_mul_ps(vc6, vt1), vc5);
__m256 vp2 = _mm256_add_ps(_mm256_mul_ps(vc6, vt2), vc5);
__m256 vp3 = _mm256_add_ps(_mm256_mul_ps(vc6, vt3), vc5);
__m256 vp4 = _mm256_add_ps(_mm256_mul_ps(vc6, vt4), vc5);
__m256 vp5 = _mm256_add_ps(_mm256_mul_ps(vc6, vt5), vc5);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc4);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc4);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc4);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc4);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc4);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc4);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc3);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc3);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc3);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc3);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc3);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc3);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vc2);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vc2);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vc2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vc2);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vc2);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vp1 = _mm256_mul_ps(vp1, vt1);
vp2 = _mm256_mul_ps(vp2, vt2);
vp3 = _mm256_mul_ps(vp3, vt3);
vp4 = _mm256_mul_ps(vp4, vt4);
vp5 = _mm256_mul_ps(vp5, vt5);
vt0 = _mm256_mul_ps(vt0, vs0);
vs0 = _mm256_sub_ps(vs0, vone);
vt1 = _mm256_mul_ps(vt1, vs1);
vs1 = _mm256_sub_ps(vs1, vone);
vt2 = _mm256_mul_ps(vt2, vs2);
vs2 = _mm256_sub_ps(vs2, vone);
vt3 = _mm256_mul_ps(vt3, vs3);
vs3 = _mm256_sub_ps(vs3, vone);
vt4 = _mm256_mul_ps(vt4, vs4);
vs4 = _mm256_sub_ps(vs4, vone);
vt5 = _mm256_mul_ps(vt5, vs5);
vs5 = _mm256_sub_ps(vs5, vone);
vp0 = _mm256_add_ps(_mm256_mul_ps(vp0, vt0), vt0);
vp1 = _mm256_add_ps(_mm256_mul_ps(vp1, vt1), vt1);
vp2 = _mm256_add_ps(_mm256_mul_ps(vp2, vt2), vt2);
vp3 = _mm256_add_ps(_mm256_mul_ps(vp3, vt3), vt3);
vp4 = _mm256_add_ps(_mm256_mul_ps(vp4, vt4), vt4);
vp5 = _mm256_add_ps(_mm256_mul_ps(vp5, vt5), vt5);
const __m256 ve0 = _mm256_mul_ps(_mm256_add_ps(vp0, vs0), valpha);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_mul_ps(_mm256_add_ps(vp1, vs1), valpha);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_mul_ps(_mm256_add_ps(vp2, vs2), valpha);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_mul_ps(_mm256_add_ps(vp3, vs3), valpha);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_mul_ps(_mm256_add_ps(vp4, vs4), valpha);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 ve5 = _mm256_mul_ps(_mm256_add_ps(vp5, vs5), valpha);
vx5 = _mm256_mul_ps(vx5, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
output += 48;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
__m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p6.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
__m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 12,587 | 45.795539 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx-rr2-p6-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx_rr2_p6_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx_rr2_p6.prescale);
const __m256 valpha = _mm256_load_ps(params->avx_rr2_p6.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx_rr2_p6.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx_rr2_p6.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx_rr2_p6.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx_rr2_p6.log2e);
const __m256 vminus_ln2_hi = _mm256_load_ps(params->avx_rr2_p6.minus_ln2_hi);
const __m256 vminus_ln2_lo = _mm256_load_ps(params->avx_rr2_p6.minus_ln2_lo);
const __m256 vc6 = _mm256_load_ps(params->avx_rr2_p6.c6);
const __m256 vc5 = _mm256_load_ps(params->avx_rr2_p6.c5);
const __m256 vc4 = _mm256_load_ps(params->avx_rr2_p6.c4);
const __m256 vc3 = _mm256_load_ps(params->avx_rr2_p6.c3);
const __m256 vc2 = _mm256_load_ps(params->avx_rr2_p6.c2);
const __m256 vone = _mm256_load_ps(params->avx_rr2_p6.one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
__m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx_rr2_p6.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_add_ps(_mm256_mul_ps(vz, vlog2e), vmagic_bias);
const __m128 vs_lo = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn)), 23));
const __m128 vs_hi = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn, 1)), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_hi), vz);
__m256 vs = _mm256_insertf128_ps(_mm256_castps128_ps256(vs_lo), vs_hi, 1);
vt = _mm256_add_ps(_mm256_mul_ps(vn, vminus_ln2_lo), vt);
__m256 vp = _mm256_add_ps(_mm256_mul_ps(vc6, vt), vc5);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc4);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc3);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_sub_ps(vs, vone);
vp = _mm256_add_ps(_mm256_mul_ps(vp, vt), vt);
const __m256 ve = _mm256_mul_ps(_mm256_add_ps(vp, vs), valpha);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 4,801 | 37.725806 | 119 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut16-p3-gather-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut16-p3-gather.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut16_p3.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut16_p3.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut16_p3.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut16_p3.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut16_p3.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut16_p3.log2e);
const __m256i vindex_mask = _mm256_load_si256((const __m256i*) params->avx2_rr1_lut16_p3.index_mask);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut16_p3.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut16_p3.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut16_p3.c2);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
input += 16;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask);
const __m256i vl0 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx0, sizeof(float));
const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask);
const __m256i vl1 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx1, sizeof(float));
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 19);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 19);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vp0 = _mm256_fmadd_ps(vc3, vt0, vc2);
__m256 vp1 = _mm256_fmadd_ps(vc3, vt1, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut16_p3.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 6,361 | 36.64497 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut16-p3-gather-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut16-p3-gather.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut16_p3.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut16_p3.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut16_p3.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut16_p3.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut16_p3.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut16_p3.log2e);
const __m256i vindex_mask = _mm256_load_si256((const __m256i*) params->avx2_rr1_lut16_p3.index_mask);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut16_p3.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut16_p3.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut16_p3.c2);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
input += 24;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask);
const __m256i vl0 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx0, sizeof(float));
const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask);
const __m256i vl1 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx1, sizeof(float));
const __m256i vidx2 = _mm256_and_si256(_mm256_castps_si256(vn2), vindex_mask);
const __m256i vl2 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx2, sizeof(float));
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 19);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 19);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 19);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vp0 = _mm256_fmadd_ps(vc3, vt0, vc2);
__m256 vp1 = _mm256_fmadd_ps(vc3, vt1, vc2);
__m256 vp2 = _mm256_fmadd_ps(vc3, vt2, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut16_p3.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 7,373 | 38.433155 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut16-p3-gather-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut16-p3-gather.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut16_p3.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut16_p3.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut16_p3.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut16_p3.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut16_p3.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut16_p3.log2e);
const __m256i vindex_mask = _mm256_load_si256((const __m256i*) params->avx2_rr1_lut16_p3.index_mask);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut16_p3.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut16_p3.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut16_p3.c2);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
input += 32;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask);
const __m256i vl0 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx0, sizeof(float));
const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask);
const __m256i vl1 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx1, sizeof(float));
const __m256i vidx2 = _mm256_and_si256(_mm256_castps_si256(vn2), vindex_mask);
const __m256i vl2 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx2, sizeof(float));
const __m256i vidx3 = _mm256_and_si256(_mm256_castps_si256(vn3), vindex_mask);
const __m256i vl3 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx3, sizeof(float));
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 19);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 19);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 19);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 19);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vp0 = _mm256_fmadd_ps(vc3, vt0, vc2);
__m256 vp1 = _mm256_fmadd_ps(vc3, vt1, vc2);
__m256 vp2 = _mm256_fmadd_ps(vc3, vt2, vc2);
__m256 vp3 = _mm256_fmadd_ps(vc3, vt3, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut16_p3.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 8,385 | 39.907317 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut16-p3-gather-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut16-p3-gather.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x40(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut16_p3.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut16_p3.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut16_p3.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut16_p3.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut16_p3.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut16_p3.log2e);
const __m256i vindex_mask = _mm256_load_si256((const __m256i*) params->avx2_rr1_lut16_p3.index_mask);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut16_p3.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut16_p3.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut16_p3.c2);
for (; batch >= 40 * sizeof(float); batch -= 40 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
input += 40;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask);
const __m256i vl0 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx0, sizeof(float));
const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask);
const __m256i vl1 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx1, sizeof(float));
const __m256i vidx2 = _mm256_and_si256(_mm256_castps_si256(vn2), vindex_mask);
const __m256i vl2 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx2, sizeof(float));
const __m256i vidx3 = _mm256_and_si256(_mm256_castps_si256(vn3), vindex_mask);
const __m256i vl3 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx3, sizeof(float));
const __m256i vidx4 = _mm256_and_si256(_mm256_castps_si256(vn4), vindex_mask);
const __m256i vl4 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx4, sizeof(float));
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 19);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 19);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 19);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 19);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 19);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vp0 = _mm256_fmadd_ps(vc3, vt0, vc2);
__m256 vp1 = _mm256_fmadd_ps(vc3, vt1, vc2);
__m256 vp2 = _mm256_fmadd_ps(vc3, vt2, vc2);
__m256 vp3 = _mm256_fmadd_ps(vc3, vt3, vc2);
__m256 vp4 = _mm256_fmadd_ps(vc3, vt4, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt4 = _mm256_mul_ps(vt4, vs4);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
output += 40;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut16_p3.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 9,397 | 41.143498 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut16-p3-gather-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut16-p3-gather.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut16_p3.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut16_p3.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut16_p3.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut16_p3.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut16_p3.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut16_p3.log2e);
const __m256i vindex_mask = _mm256_load_si256((const __m256i*) params->avx2_rr1_lut16_p3.index_mask);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut16_p3.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut16_p3.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut16_p3.c2);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
__m256 vx5 = _mm256_loadu_ps(input + 40);
input += 48;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask);
const __m256i vl0 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx0, sizeof(float));
const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask);
const __m256i vl1 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx1, sizeof(float));
const __m256i vidx2 = _mm256_and_si256(_mm256_castps_si256(vn2), vindex_mask);
const __m256i vl2 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx2, sizeof(float));
const __m256i vidx3 = _mm256_and_si256(_mm256_castps_si256(vn3), vindex_mask);
const __m256i vl3 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx3, sizeof(float));
const __m256i vidx4 = _mm256_and_si256(_mm256_castps_si256(vn4), vindex_mask);
const __m256i vl4 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx4, sizeof(float));
const __m256i vidx5 = _mm256_and_si256(_mm256_castps_si256(vn5), vindex_mask);
const __m256i vl5 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx5, sizeof(float));
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 19);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 19);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 19);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 19);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 19);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 19);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vp0 = _mm256_fmadd_ps(vc3, vt0, vc2);
__m256 vp1 = _mm256_fmadd_ps(vc3, vt1, vc2);
__m256 vp2 = _mm256_fmadd_ps(vc3, vt2, vc2);
__m256 vp3 = _mm256_fmadd_ps(vc3, vt3, vc2);
__m256 vp4 = _mm256_fmadd_ps(vc3, vt4, vc2);
__m256 vp5 = _mm256_fmadd_ps(vc3, vt5, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt4 = _mm256_mul_ps(vt4, vs4);
vp5 = _mm256_mul_ps(vp5, vt5);
vt5 = _mm256_mul_ps(vt5, vs5);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
vx5 = _mm256_mul_ps(vx5, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
output += 48;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut16_p3.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 10,409 | 42.195021 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut16-p3-gather-x56.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut16-p3-gather.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x56(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut16_p3.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut16_p3.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut16_p3.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut16_p3.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut16_p3.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut16_p3.log2e);
const __m256i vindex_mask = _mm256_load_si256((const __m256i*) params->avx2_rr1_lut16_p3.index_mask);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut16_p3.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut16_p3.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut16_p3.c2);
for (; batch >= 56 * sizeof(float); batch -= 56 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
__m256 vx5 = _mm256_loadu_ps(input + 40);
__m256 vx6 = _mm256_loadu_ps(input + 48);
input += 56;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask);
const __m256i vl0 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx0, sizeof(float));
const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask);
const __m256i vl1 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx1, sizeof(float));
const __m256i vidx2 = _mm256_and_si256(_mm256_castps_si256(vn2), vindex_mask);
const __m256i vl2 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx2, sizeof(float));
const __m256i vidx3 = _mm256_and_si256(_mm256_castps_si256(vn3), vindex_mask);
const __m256i vl3 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx3, sizeof(float));
const __m256i vidx4 = _mm256_and_si256(_mm256_castps_si256(vn4), vindex_mask);
const __m256i vl4 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx4, sizeof(float));
const __m256i vidx5 = _mm256_and_si256(_mm256_castps_si256(vn5), vindex_mask);
const __m256i vl5 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx5, sizeof(float));
const __m256i vidx6 = _mm256_and_si256(_mm256_castps_si256(vn6), vindex_mask);
const __m256i vl6 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx6, sizeof(float));
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 19);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 19);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 19);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 19);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 19);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 19);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256i ven6 = _mm256_slli_epi32(_mm256_castps_si256(vn6), 19);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vs6 = _mm256_castsi256_ps(_mm256_add_epi32(vl6, ven6));
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vp0 = _mm256_fmadd_ps(vc3, vt0, vc2);
__m256 vp1 = _mm256_fmadd_ps(vc3, vt1, vc2);
__m256 vp2 = _mm256_fmadd_ps(vc3, vt2, vc2);
__m256 vp3 = _mm256_fmadd_ps(vc3, vt3, vc2);
__m256 vp4 = _mm256_fmadd_ps(vc3, vt4, vc2);
__m256 vp5 = _mm256_fmadd_ps(vc3, vt5, vc2);
__m256 vp6 = _mm256_fmadd_ps(vc3, vt6, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt4 = _mm256_mul_ps(vt4, vs4);
vp5 = _mm256_mul_ps(vp5, vt5);
vt5 = _mm256_mul_ps(vt5, vs5);
vp6 = _mm256_mul_ps(vp6, vt6);
vt6 = _mm256_mul_ps(vt6, vs6);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
vx5 = _mm256_mul_ps(vx5, vbeta);
const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
vx6 = _mm256_mul_ps(vx6, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
output += 56;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut16_p3.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 11,421 | 43.100386 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut16-p3-gather-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut16-p3-gather.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut16_p3.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut16_p3.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut16_p3.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut16_p3.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut16_p3.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut16_p3.log2e);
const __m256i vindex_mask = _mm256_load_si256((const __m256i*) params->avx2_rr1_lut16_p3.index_mask);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut16_p3.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut16_p3.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut16_p3.c2);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
__m256 vx5 = _mm256_loadu_ps(input + 40);
__m256 vx6 = _mm256_loadu_ps(input + 48);
__m256 vx7 = _mm256_loadu_ps(input + 56);
input += 64;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
const __m256 vz7 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx7, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask);
const __m256i vl0 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx0, sizeof(float));
const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask);
const __m256i vl1 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx1, sizeof(float));
const __m256i vidx2 = _mm256_and_si256(_mm256_castps_si256(vn2), vindex_mask);
const __m256i vl2 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx2, sizeof(float));
const __m256i vidx3 = _mm256_and_si256(_mm256_castps_si256(vn3), vindex_mask);
const __m256i vl3 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx3, sizeof(float));
const __m256i vidx4 = _mm256_and_si256(_mm256_castps_si256(vn4), vindex_mask);
const __m256i vl4 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx4, sizeof(float));
const __m256i vidx5 = _mm256_and_si256(_mm256_castps_si256(vn5), vindex_mask);
const __m256i vl5 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx5, sizeof(float));
const __m256i vidx6 = _mm256_and_si256(_mm256_castps_si256(vn6), vindex_mask);
const __m256i vl6 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx6, sizeof(float));
const __m256i vidx7 = _mm256_and_si256(_mm256_castps_si256(vn7), vindex_mask);
const __m256i vl7 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx7, sizeof(float));
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 19);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 19);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 19);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 19);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 19);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 19);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256i ven6 = _mm256_slli_epi32(_mm256_castps_si256(vn6), 19);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
const __m256i ven7 = _mm256_slli_epi32(_mm256_castps_si256(vn7), 19);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vs6 = _mm256_castsi256_ps(_mm256_add_epi32(vl6, ven6));
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vs7 = _mm256_castsi256_ps(_mm256_add_epi32(vl7, ven7));
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vp0 = _mm256_fmadd_ps(vc3, vt0, vc2);
__m256 vp1 = _mm256_fmadd_ps(vc3, vt1, vc2);
__m256 vp2 = _mm256_fmadd_ps(vc3, vt2, vc2);
__m256 vp3 = _mm256_fmadd_ps(vc3, vt3, vc2);
__m256 vp4 = _mm256_fmadd_ps(vc3, vt4, vc2);
__m256 vp5 = _mm256_fmadd_ps(vc3, vt5, vc2);
__m256 vp6 = _mm256_fmadd_ps(vc3, vt6, vc2);
__m256 vp7 = _mm256_fmadd_ps(vc3, vt7, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt4 = _mm256_mul_ps(vt4, vs4);
vp5 = _mm256_mul_ps(vp5, vt5);
vt5 = _mm256_mul_ps(vt5, vs5);
vp6 = _mm256_mul_ps(vp6, vt6);
vt6 = _mm256_mul_ps(vt6, vs6);
vp7 = _mm256_mul_ps(vp7, vt7);
vt7 = _mm256_mul_ps(vt7, vs7);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
vs7 = _mm256_fmsub_ps(vs7, valpha, valpha);
vp7 = _mm256_fmadd_ps(vp7, vt7, vt7);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
vx5 = _mm256_mul_ps(vx5, vbeta);
const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
vx6 = _mm256_mul_ps(vx6, vbeta);
const __m256 ve7 = _mm256_fmadd_ps(vp7, valpha, vs7);
vx7 = _mm256_mul_ps(vx7, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
const __m256 vy7 = _mm256_blendv_ps(vx7, ve7, vx7);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
output += 64;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut16_p3.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 12,433 | 43.888087 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut16-p3-gather-x72.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut16-p3-gather.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut16_p3.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut16_p3.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut16_p3.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut16_p3.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut16_p3.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut16_p3.log2e);
const __m256i vindex_mask = _mm256_load_si256((const __m256i*) params->avx2_rr1_lut16_p3.index_mask);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut16_p3.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut16_p3.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut16_p3.c2);
for (; batch >= 72 * sizeof(float); batch -= 72 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
__m256 vx5 = _mm256_loadu_ps(input + 40);
__m256 vx6 = _mm256_loadu_ps(input + 48);
__m256 vx7 = _mm256_loadu_ps(input + 56);
__m256 vx8 = _mm256_loadu_ps(input + 64);
input += 72;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
const __m256 vz7 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx7, vprescale));
const __m256 vz8 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx8, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask);
const __m256i vl0 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx0, sizeof(float));
const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask);
const __m256i vl1 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx1, sizeof(float));
const __m256i vidx2 = _mm256_and_si256(_mm256_castps_si256(vn2), vindex_mask);
const __m256i vl2 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx2, sizeof(float));
const __m256i vidx3 = _mm256_and_si256(_mm256_castps_si256(vn3), vindex_mask);
const __m256i vl3 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx3, sizeof(float));
const __m256i vidx4 = _mm256_and_si256(_mm256_castps_si256(vn4), vindex_mask);
const __m256i vl4 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx4, sizeof(float));
const __m256i vidx5 = _mm256_and_si256(_mm256_castps_si256(vn5), vindex_mask);
const __m256i vl5 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx5, sizeof(float));
const __m256i vidx6 = _mm256_and_si256(_mm256_castps_si256(vn6), vindex_mask);
const __m256i vl6 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx6, sizeof(float));
const __m256i vidx7 = _mm256_and_si256(_mm256_castps_si256(vn7), vindex_mask);
const __m256i vl7 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx7, sizeof(float));
const __m256i vidx8 = _mm256_and_si256(_mm256_castps_si256(vn8), vindex_mask);
const __m256i vl8 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx8, sizeof(float));
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 19);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 19);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 19);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 19);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 19);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 19);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256i ven6 = _mm256_slli_epi32(_mm256_castps_si256(vn6), 19);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
const __m256i ven7 = _mm256_slli_epi32(_mm256_castps_si256(vn7), 19);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
const __m256i ven8 = _mm256_slli_epi32(_mm256_castps_si256(vn8), 19);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vs6 = _mm256_castsi256_ps(_mm256_add_epi32(vl6, ven6));
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vs7 = _mm256_castsi256_ps(_mm256_add_epi32(vl7, ven7));
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vs8 = _mm256_castsi256_ps(_mm256_add_epi32(vl8, ven8));
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
__m256 vp0 = _mm256_fmadd_ps(vc3, vt0, vc2);
__m256 vp1 = _mm256_fmadd_ps(vc3, vt1, vc2);
__m256 vp2 = _mm256_fmadd_ps(vc3, vt2, vc2);
__m256 vp3 = _mm256_fmadd_ps(vc3, vt3, vc2);
__m256 vp4 = _mm256_fmadd_ps(vc3, vt4, vc2);
__m256 vp5 = _mm256_fmadd_ps(vc3, vt5, vc2);
__m256 vp6 = _mm256_fmadd_ps(vc3, vt6, vc2);
__m256 vp7 = _mm256_fmadd_ps(vc3, vt7, vc2);
__m256 vp8 = _mm256_fmadd_ps(vc3, vt8, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt4 = _mm256_mul_ps(vt4, vs4);
vp5 = _mm256_mul_ps(vp5, vt5);
vt5 = _mm256_mul_ps(vt5, vs5);
vp6 = _mm256_mul_ps(vp6, vt6);
vt6 = _mm256_mul_ps(vt6, vs6);
vp7 = _mm256_mul_ps(vp7, vt7);
vt7 = _mm256_mul_ps(vt7, vs7);
vp8 = _mm256_mul_ps(vp8, vt8);
vt8 = _mm256_mul_ps(vt8, vs8);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
vs7 = _mm256_fmsub_ps(vs7, valpha, valpha);
vp7 = _mm256_fmadd_ps(vp7, vt7, vt7);
vs8 = _mm256_fmsub_ps(vs8, valpha, valpha);
vp8 = _mm256_fmadd_ps(vp8, vt8, vt8);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
vx5 = _mm256_mul_ps(vx5, vbeta);
const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
vx6 = _mm256_mul_ps(vx6, vbeta);
const __m256 ve7 = _mm256_fmadd_ps(vp7, valpha, vs7);
vx7 = _mm256_mul_ps(vx7, vbeta);
const __m256 ve8 = _mm256_fmadd_ps(vp8, valpha, vs8);
vx8 = _mm256_mul_ps(vx8, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
const __m256 vy7 = _mm256_blendv_ps(vx7, ve7, vx7);
const __m256 vy8 = _mm256_blendv_ps(vx8, ve8, vx8);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
_mm256_storeu_ps(output + 64, vy8);
output += 72;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut16_p3.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 13,445 | 44.579661 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut16-p3-gather-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut16-p3-gather.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut16_p3.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut16_p3.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut16_p3.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut16_p3.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut16_p3.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut16_p3.log2e);
const __m256i vindex_mask = _mm256_load_si256((const __m256i*) params->avx2_rr1_lut16_p3.index_mask);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut16_p3.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut16_p3.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut16_p3.c2);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut16_p3.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 4,229 | 34.847458 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut16-p3-gather-x80.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut16-p3-gather.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
extern XNN_INTERNAL const int xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x80(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut16_p3.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut16_p3.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut16_p3.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut16_p3.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut16_p3.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut16_p3.log2e);
const __m256i vindex_mask = _mm256_load_si256((const __m256i*) params->avx2_rr1_lut16_p3.index_mask);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut16_p3.minus_ln2);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut16_p3.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut16_p3.c2);
for (; batch >= 80 * sizeof(float); batch -= 80 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
__m256 vx5 = _mm256_loadu_ps(input + 40);
__m256 vx6 = _mm256_loadu_ps(input + 48);
__m256 vx7 = _mm256_loadu_ps(input + 56);
__m256 vx8 = _mm256_loadu_ps(input + 64);
__m256 vx9 = _mm256_loadu_ps(input + 72);
input += 80;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
const __m256 vz7 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx7, vprescale));
const __m256 vz8 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx8, vprescale));
const __m256 vz9 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx9, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
__m256 vn9 = _mm256_fmadd_ps(vz9, vlog2e, vmagic_bias);
const __m256i vidx0 = _mm256_and_si256(_mm256_castps_si256(vn0), vindex_mask);
const __m256i vl0 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx0, sizeof(float));
const __m256i vidx1 = _mm256_and_si256(_mm256_castps_si256(vn1), vindex_mask);
const __m256i vl1 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx1, sizeof(float));
const __m256i vidx2 = _mm256_and_si256(_mm256_castps_si256(vn2), vindex_mask);
const __m256i vl2 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx2, sizeof(float));
const __m256i vidx3 = _mm256_and_si256(_mm256_castps_si256(vn3), vindex_mask);
const __m256i vl3 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx3, sizeof(float));
const __m256i vidx4 = _mm256_and_si256(_mm256_castps_si256(vn4), vindex_mask);
const __m256i vl4 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx4, sizeof(float));
const __m256i vidx5 = _mm256_and_si256(_mm256_castps_si256(vn5), vindex_mask);
const __m256i vl5 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx5, sizeof(float));
const __m256i vidx6 = _mm256_and_si256(_mm256_castps_si256(vn6), vindex_mask);
const __m256i vl6 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx6, sizeof(float));
const __m256i vidx7 = _mm256_and_si256(_mm256_castps_si256(vn7), vindex_mask);
const __m256i vl7 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx7, sizeof(float));
const __m256i vidx8 = _mm256_and_si256(_mm256_castps_si256(vn8), vindex_mask);
const __m256i vl8 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx8, sizeof(float));
const __m256i vidx9 = _mm256_and_si256(_mm256_castps_si256(vn9), vindex_mask);
const __m256i vl9 = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx9, sizeof(float));
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 19);
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 19);
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 19);
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 19);
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 19);
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 19);
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256i ven6 = _mm256_slli_epi32(_mm256_castps_si256(vn6), 19);
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
const __m256i ven7 = _mm256_slli_epi32(_mm256_castps_si256(vn7), 19);
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
const __m256i ven8 = _mm256_slli_epi32(_mm256_castps_si256(vn8), 19);
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
const __m256i ven9 = _mm256_slli_epi32(_mm256_castps_si256(vn9), 19);
vn9 = _mm256_sub_ps(vn9, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vs6 = _mm256_castsi256_ps(_mm256_add_epi32(vl6, ven6));
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vs7 = _mm256_castsi256_ps(_mm256_add_epi32(vl7, ven7));
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vs8 = _mm256_castsi256_ps(_mm256_add_epi32(vl8, ven8));
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
__m256 vs9 = _mm256_castsi256_ps(_mm256_add_epi32(vl9, ven9));
__m256 vt9 = _mm256_fmadd_ps(vn9, vminus_ln2, vz9);
__m256 vp0 = _mm256_fmadd_ps(vc3, vt0, vc2);
__m256 vp1 = _mm256_fmadd_ps(vc3, vt1, vc2);
__m256 vp2 = _mm256_fmadd_ps(vc3, vt2, vc2);
__m256 vp3 = _mm256_fmadd_ps(vc3, vt3, vc2);
__m256 vp4 = _mm256_fmadd_ps(vc3, vt4, vc2);
__m256 vp5 = _mm256_fmadd_ps(vc3, vt5, vc2);
__m256 vp6 = _mm256_fmadd_ps(vc3, vt6, vc2);
__m256 vp7 = _mm256_fmadd_ps(vc3, vt7, vc2);
__m256 vp8 = _mm256_fmadd_ps(vc3, vt8, vc2);
__m256 vp9 = _mm256_fmadd_ps(vc3, vt9, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt4 = _mm256_mul_ps(vt4, vs4);
vp5 = _mm256_mul_ps(vp5, vt5);
vt5 = _mm256_mul_ps(vt5, vs5);
vp6 = _mm256_mul_ps(vp6, vt6);
vt6 = _mm256_mul_ps(vt6, vs6);
vp7 = _mm256_mul_ps(vp7, vt7);
vt7 = _mm256_mul_ps(vt7, vs7);
vp8 = _mm256_mul_ps(vp8, vt8);
vt8 = _mm256_mul_ps(vt8, vs8);
vp9 = _mm256_mul_ps(vp9, vt9);
vt9 = _mm256_mul_ps(vt9, vs9);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
vs7 = _mm256_fmsub_ps(vs7, valpha, valpha);
vp7 = _mm256_fmadd_ps(vp7, vt7, vt7);
vs8 = _mm256_fmsub_ps(vs8, valpha, valpha);
vp8 = _mm256_fmadd_ps(vp8, vt8, vt8);
vs9 = _mm256_fmsub_ps(vs9, valpha, valpha);
vp9 = _mm256_fmadd_ps(vp9, vt9, vt9);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
vx5 = _mm256_mul_ps(vx5, vbeta);
const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
vx6 = _mm256_mul_ps(vx6, vbeta);
const __m256 ve7 = _mm256_fmadd_ps(vp7, valpha, vs7);
vx7 = _mm256_mul_ps(vx7, vbeta);
const __m256 ve8 = _mm256_fmadd_ps(vp8, valpha, vs8);
vx8 = _mm256_mul_ps(vx8, vbeta);
const __m256 ve9 = _mm256_fmadd_ps(vp9, valpha, vs9);
vx9 = _mm256_mul_ps(vx9, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
const __m256 vy7 = _mm256_blendv_ps(vx7, ve7, vx7);
const __m256 vy8 = _mm256_blendv_ps(vx8, ve8, vx8);
const __m256 vy9 = _mm256_blendv_ps(vx9, ve9, vx9);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
_mm256_storeu_ps(output + 64, vy8);
_mm256_storeu_ps(output + 72, vy9);
output += 80;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut16_p3.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i vidx = _mm256_and_si256(_mm256_castps_si256(vn), vindex_mask);
const __m256i vl = _mm256_i32gather_epi32(xnn_table_exp2minus_k_over_16, vidx, sizeof(float));
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 19);
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc3, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 14,457 | 45.191693 | 126 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut4-p4-perm-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut4-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut4_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut4_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut4_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut4_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut4_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut4_p4.log2e);
const __m256 vtable = _mm256_load_ps(params->avx2_rr1_lut4_p4.table);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut4_p4.minus_ln2);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c2);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
input += 16;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 21);
const __m256i vl0 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0)));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 21);
const __m256i vl1 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1)));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
__m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut4_p4.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 6,145 | 36.024096 | 125 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut4-p4-perm-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut4-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut4_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut4_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut4_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut4_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut4_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut4_p4.log2e);
const __m256 vtable = _mm256_load_ps(params->avx2_rr1_lut4_p4.table);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut4_p4.minus_ln2);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c2);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
input += 24;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 21);
const __m256i vl0 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0)));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 21);
const __m256i vl1 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1)));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 21);
const __m256i vl2 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn2)));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
__m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
__m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
output += 24;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut4_p4.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 7,116 | 37.679348 | 125 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut4-p4-perm-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut4-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut4_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut4_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut4_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut4_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut4_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut4_p4.log2e);
const __m256 vtable = _mm256_load_ps(params->avx2_rr1_lut4_p4.table);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut4_p4.minus_ln2);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c2);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
input += 32;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 21);
const __m256i vl0 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0)));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 21);
const __m256i vl1 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1)));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 21);
const __m256i vl2 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn2)));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 21);
const __m256i vl3 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn3)));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
__m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
__m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
__m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
output += 32;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut4_p4.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 8,087 | 39.039604 | 125 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut4-p4-perm-x40.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut4-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x40(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut4_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut4_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut4_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut4_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut4_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut4_p4.log2e);
const __m256 vtable = _mm256_load_ps(params->avx2_rr1_lut4_p4.table);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut4_p4.minus_ln2);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c2);
for (; batch >= 40 * sizeof(float); batch -= 40 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
input += 40;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 21);
const __m256i vl0 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0)));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 21);
const __m256i vl1 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1)));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 21);
const __m256i vl2 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn2)));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 21);
const __m256i vl3 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn3)));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 21);
const __m256i vl4 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn4)));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
__m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
__m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
__m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
__m256 vp4 = _mm256_fmadd_ps(vc4, vt4, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt4 = _mm256_mul_ps(vt4, vs4);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
output += 40;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut4_p4.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 9,058 | 40.177273 | 125 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut4-p4-perm-x48.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut4-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x48(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut4_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut4_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut4_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut4_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut4_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut4_p4.log2e);
const __m256 vtable = _mm256_load_ps(params->avx2_rr1_lut4_p4.table);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut4_p4.minus_ln2);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c2);
for (; batch >= 48 * sizeof(float); batch -= 48 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
__m256 vx5 = _mm256_loadu_ps(input + 40);
input += 48;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 21);
const __m256i vl0 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0)));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 21);
const __m256i vl1 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1)));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 21);
const __m256i vl2 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn2)));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 21);
const __m256i vl3 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn3)));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 21);
const __m256i vl4 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn4)));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 21);
const __m256i vl5 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn5)));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
__m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
__m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
__m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
__m256 vp4 = _mm256_fmadd_ps(vc4, vt4, vc3);
__m256 vp5 = _mm256_fmadd_ps(vc4, vt5, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt4 = _mm256_mul_ps(vt4, vs4);
vp5 = _mm256_mul_ps(vp5, vt5);
vt5 = _mm256_mul_ps(vt5, vs5);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
vx5 = _mm256_mul_ps(vx5, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
output += 48;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut4_p4.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 10,029 | 41.142857 | 125 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut4-p4-perm-x56.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut4-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x56(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut4_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut4_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut4_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut4_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut4_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut4_p4.log2e);
const __m256 vtable = _mm256_load_ps(params->avx2_rr1_lut4_p4.table);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut4_p4.minus_ln2);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c2);
for (; batch >= 56 * sizeof(float); batch -= 56 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
__m256 vx5 = _mm256_loadu_ps(input + 40);
__m256 vx6 = _mm256_loadu_ps(input + 48);
input += 56;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 21);
const __m256i vl0 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0)));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 21);
const __m256i vl1 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1)));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 21);
const __m256i vl2 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn2)));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 21);
const __m256i vl3 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn3)));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 21);
const __m256i vl4 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn4)));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 21);
const __m256i vl5 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn5)));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256i ven6 = _mm256_slli_epi32(_mm256_castps_si256(vn6), 21);
const __m256i vl6 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn6)));
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vs6 = _mm256_castsi256_ps(_mm256_add_epi32(vl6, ven6));
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
__m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
__m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
__m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
__m256 vp4 = _mm256_fmadd_ps(vc4, vt4, vc3);
__m256 vp5 = _mm256_fmadd_ps(vc4, vt5, vc3);
__m256 vp6 = _mm256_fmadd_ps(vc4, vt6, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt4 = _mm256_mul_ps(vt4, vs4);
vp5 = _mm256_mul_ps(vp5, vt5);
vt5 = _mm256_mul_ps(vt5, vs5);
vp6 = _mm256_mul_ps(vp6, vt6);
vt6 = _mm256_mul_ps(vt6, vs6);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
vx5 = _mm256_mul_ps(vx5, vbeta);
const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
vx6 = _mm256_mul_ps(vx6, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
output += 56;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut4_p4.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 11,000 | 41.972656 | 125 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut4-p4-perm-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut4-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x64(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut4_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut4_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut4_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut4_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut4_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut4_p4.log2e);
const __m256 vtable = _mm256_load_ps(params->avx2_rr1_lut4_p4.table);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut4_p4.minus_ln2);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c2);
for (; batch >= 64 * sizeof(float); batch -= 64 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
__m256 vx5 = _mm256_loadu_ps(input + 40);
__m256 vx6 = _mm256_loadu_ps(input + 48);
__m256 vx7 = _mm256_loadu_ps(input + 56);
input += 64;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
const __m256 vz7 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx7, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 21);
const __m256i vl0 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0)));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 21);
const __m256i vl1 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1)));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 21);
const __m256i vl2 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn2)));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 21);
const __m256i vl3 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn3)));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 21);
const __m256i vl4 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn4)));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 21);
const __m256i vl5 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn5)));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256i ven6 = _mm256_slli_epi32(_mm256_castps_si256(vn6), 21);
const __m256i vl6 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn6)));
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
const __m256i ven7 = _mm256_slli_epi32(_mm256_castps_si256(vn7), 21);
const __m256i vl7 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn7)));
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vs6 = _mm256_castsi256_ps(_mm256_add_epi32(vl6, ven6));
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vs7 = _mm256_castsi256_ps(_mm256_add_epi32(vl7, ven7));
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
__m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
__m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
__m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
__m256 vp4 = _mm256_fmadd_ps(vc4, vt4, vc3);
__m256 vp5 = _mm256_fmadd_ps(vc4, vt5, vc3);
__m256 vp6 = _mm256_fmadd_ps(vc4, vt6, vc3);
__m256 vp7 = _mm256_fmadd_ps(vc4, vt7, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt4 = _mm256_mul_ps(vt4, vs4);
vp5 = _mm256_mul_ps(vp5, vt5);
vt5 = _mm256_mul_ps(vt5, vs5);
vp6 = _mm256_mul_ps(vp6, vt6);
vt6 = _mm256_mul_ps(vt6, vs6);
vp7 = _mm256_mul_ps(vp7, vt7);
vt7 = _mm256_mul_ps(vt7, vs7);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
vs7 = _mm256_fmsub_ps(vs7, valpha, valpha);
vp7 = _mm256_fmadd_ps(vp7, vt7, vt7);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
vx5 = _mm256_mul_ps(vx5, vbeta);
const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
vx6 = _mm256_mul_ps(vx6, vbeta);
const __m256 ve7 = _mm256_fmadd_ps(vp7, valpha, vs7);
vx7 = _mm256_mul_ps(vx7, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
const __m256 vy7 = _mm256_blendv_ps(vx7, ve7, vx7);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
output += 64;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut4_p4.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 11,971 | 42.693431 | 125 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-avx2-rr1-lut4-p4-perm-x72.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/avx2-rr1-lut4-p4-perm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x72(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_lut4_p4.prescale);
const __m256 valpha = _mm256_load_ps(params->avx2_rr1_lut4_p4.alpha);
const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_lut4_p4.beta);
const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_lut4_p4.sat_cutoff);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_lut4_p4.magic_bias);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_lut4_p4.log2e);
const __m256 vtable = _mm256_load_ps(params->avx2_rr1_lut4_p4.table);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut4_p4.minus_ln2);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_lut4_p4.c2);
for (; batch >= 72 * sizeof(float); batch -= 72 * sizeof(float)) {
__m256 vx0 = _mm256_loadu_ps(input);
__m256 vx1 = _mm256_loadu_ps(input + 8);
__m256 vx2 = _mm256_loadu_ps(input + 16);
__m256 vx3 = _mm256_loadu_ps(input + 24);
__m256 vx4 = _mm256_loadu_ps(input + 32);
__m256 vx5 = _mm256_loadu_ps(input + 40);
__m256 vx6 = _mm256_loadu_ps(input + 48);
__m256 vx7 = _mm256_loadu_ps(input + 56);
__m256 vx8 = _mm256_loadu_ps(input + 64);
input += 72;
const __m256 vz0 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx0, vprescale));
const __m256 vz1 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx1, vprescale));
const __m256 vz2 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx2, vprescale));
const __m256 vz3 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx3, vprescale));
const __m256 vz4 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx4, vprescale));
const __m256 vz5 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx5, vprescale));
const __m256 vz6 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx6, vprescale));
const __m256 vz7 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx7, vprescale));
const __m256 vz8 = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx8, vprescale));
__m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
__m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
__m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
__m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
__m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);
__m256 vn5 = _mm256_fmadd_ps(vz5, vlog2e, vmagic_bias);
__m256 vn6 = _mm256_fmadd_ps(vz6, vlog2e, vmagic_bias);
__m256 vn7 = _mm256_fmadd_ps(vz7, vlog2e, vmagic_bias);
__m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias);
const __m256i ven0 = _mm256_slli_epi32(_mm256_castps_si256(vn0), 21);
const __m256i vl0 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn0)));
vn0 = _mm256_sub_ps(vn0, vmagic_bias);
const __m256i ven1 = _mm256_slli_epi32(_mm256_castps_si256(vn1), 21);
const __m256i vl1 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn1)));
vn1 = _mm256_sub_ps(vn1, vmagic_bias);
const __m256i ven2 = _mm256_slli_epi32(_mm256_castps_si256(vn2), 21);
const __m256i vl2 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn2)));
vn2 = _mm256_sub_ps(vn2, vmagic_bias);
const __m256i ven3 = _mm256_slli_epi32(_mm256_castps_si256(vn3), 21);
const __m256i vl3 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn3)));
vn3 = _mm256_sub_ps(vn3, vmagic_bias);
const __m256i ven4 = _mm256_slli_epi32(_mm256_castps_si256(vn4), 21);
const __m256i vl4 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn4)));
vn4 = _mm256_sub_ps(vn4, vmagic_bias);
const __m256i ven5 = _mm256_slli_epi32(_mm256_castps_si256(vn5), 21);
const __m256i vl5 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn5)));
vn5 = _mm256_sub_ps(vn5, vmagic_bias);
const __m256i ven6 = _mm256_slli_epi32(_mm256_castps_si256(vn6), 21);
const __m256i vl6 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn6)));
vn6 = _mm256_sub_ps(vn6, vmagic_bias);
const __m256i ven7 = _mm256_slli_epi32(_mm256_castps_si256(vn7), 21);
const __m256i vl7 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn7)));
vn7 = _mm256_sub_ps(vn7, vmagic_bias);
const __m256i ven8 = _mm256_slli_epi32(_mm256_castps_si256(vn8), 21);
const __m256i vl8 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn8)));
vn8 = _mm256_sub_ps(vn8, vmagic_bias);
__m256 vs0 = _mm256_castsi256_ps(_mm256_add_epi32(vl0, ven0));
__m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
__m256 vs1 = _mm256_castsi256_ps(_mm256_add_epi32(vl1, ven1));
__m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
__m256 vs2 = _mm256_castsi256_ps(_mm256_add_epi32(vl2, ven2));
__m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
__m256 vs3 = _mm256_castsi256_ps(_mm256_add_epi32(vl3, ven3));
__m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
__m256 vs4 = _mm256_castsi256_ps(_mm256_add_epi32(vl4, ven4));
__m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vz4);
__m256 vs5 = _mm256_castsi256_ps(_mm256_add_epi32(vl5, ven5));
__m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vz5);
__m256 vs6 = _mm256_castsi256_ps(_mm256_add_epi32(vl6, ven6));
__m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vz6);
__m256 vs7 = _mm256_castsi256_ps(_mm256_add_epi32(vl7, ven7));
__m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vz7);
__m256 vs8 = _mm256_castsi256_ps(_mm256_add_epi32(vl8, ven8));
__m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8);
__m256 vp0 = _mm256_fmadd_ps(vc4, vt0, vc3);
__m256 vp1 = _mm256_fmadd_ps(vc4, vt1, vc3);
__m256 vp2 = _mm256_fmadd_ps(vc4, vt2, vc3);
__m256 vp3 = _mm256_fmadd_ps(vc4, vt3, vc3);
__m256 vp4 = _mm256_fmadd_ps(vc4, vt4, vc3);
__m256 vp5 = _mm256_fmadd_ps(vc4, vt5, vc3);
__m256 vp6 = _mm256_fmadd_ps(vc4, vt6, vc3);
__m256 vp7 = _mm256_fmadd_ps(vc4, vt7, vc3);
__m256 vp8 = _mm256_fmadd_ps(vc4, vt8, vc3);
vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
vp8 = _mm256_fmadd_ps(vp8, vt8, vc2);
vp0 = _mm256_mul_ps(vp0, vt0);
vt0 = _mm256_mul_ps(vt0, vs0);
vp1 = _mm256_mul_ps(vp1, vt1);
vt1 = _mm256_mul_ps(vt1, vs1);
vp2 = _mm256_mul_ps(vp2, vt2);
vt2 = _mm256_mul_ps(vt2, vs2);
vp3 = _mm256_mul_ps(vp3, vt3);
vt3 = _mm256_mul_ps(vt3, vs3);
vp4 = _mm256_mul_ps(vp4, vt4);
vt4 = _mm256_mul_ps(vt4, vs4);
vp5 = _mm256_mul_ps(vp5, vt5);
vt5 = _mm256_mul_ps(vt5, vs5);
vp6 = _mm256_mul_ps(vp6, vt6);
vt6 = _mm256_mul_ps(vt6, vs6);
vp7 = _mm256_mul_ps(vp7, vt7);
vt7 = _mm256_mul_ps(vt7, vs7);
vp8 = _mm256_mul_ps(vp8, vt8);
vt8 = _mm256_mul_ps(vt8, vs8);
vs0 = _mm256_fmsub_ps(vs0, valpha, valpha);
vp0 = _mm256_fmadd_ps(vp0, vt0, vt0);
vs1 = _mm256_fmsub_ps(vs1, valpha, valpha);
vp1 = _mm256_fmadd_ps(vp1, vt1, vt1);
vs2 = _mm256_fmsub_ps(vs2, valpha, valpha);
vp2 = _mm256_fmadd_ps(vp2, vt2, vt2);
vs3 = _mm256_fmsub_ps(vs3, valpha, valpha);
vp3 = _mm256_fmadd_ps(vp3, vt3, vt3);
vs4 = _mm256_fmsub_ps(vs4, valpha, valpha);
vp4 = _mm256_fmadd_ps(vp4, vt4, vt4);
vs5 = _mm256_fmsub_ps(vs5, valpha, valpha);
vp5 = _mm256_fmadd_ps(vp5, vt5, vt5);
vs6 = _mm256_fmsub_ps(vs6, valpha, valpha);
vp6 = _mm256_fmadd_ps(vp6, vt6, vt6);
vs7 = _mm256_fmsub_ps(vs7, valpha, valpha);
vp7 = _mm256_fmadd_ps(vp7, vt7, vt7);
vs8 = _mm256_fmsub_ps(vs8, valpha, valpha);
vp8 = _mm256_fmadd_ps(vp8, vt8, vt8);
const __m256 ve0 = _mm256_fmadd_ps(vp0, valpha, vs0);
vx0 = _mm256_mul_ps(vx0, vbeta);
const __m256 ve1 = _mm256_fmadd_ps(vp1, valpha, vs1);
vx1 = _mm256_mul_ps(vx1, vbeta);
const __m256 ve2 = _mm256_fmadd_ps(vp2, valpha, vs2);
vx2 = _mm256_mul_ps(vx2, vbeta);
const __m256 ve3 = _mm256_fmadd_ps(vp3, valpha, vs3);
vx3 = _mm256_mul_ps(vx3, vbeta);
const __m256 ve4 = _mm256_fmadd_ps(vp4, valpha, vs4);
vx4 = _mm256_mul_ps(vx4, vbeta);
const __m256 ve5 = _mm256_fmadd_ps(vp5, valpha, vs5);
vx5 = _mm256_mul_ps(vx5, vbeta);
const __m256 ve6 = _mm256_fmadd_ps(vp6, valpha, vs6);
vx6 = _mm256_mul_ps(vx6, vbeta);
const __m256 ve7 = _mm256_fmadd_ps(vp7, valpha, vs7);
vx7 = _mm256_mul_ps(vx7, vbeta);
const __m256 ve8 = _mm256_fmadd_ps(vp8, valpha, vs8);
vx8 = _mm256_mul_ps(vx8, vbeta);
const __m256 vy0 = _mm256_blendv_ps(vx0, ve0, vx0);
const __m256 vy1 = _mm256_blendv_ps(vx1, ve1, vx1);
const __m256 vy2 = _mm256_blendv_ps(vx2, ve2, vx2);
const __m256 vy3 = _mm256_blendv_ps(vx3, ve3, vx3);
const __m256 vy4 = _mm256_blendv_ps(vx4, ve4, vx4);
const __m256 vy5 = _mm256_blendv_ps(vx5, ve5, vx5);
const __m256 vy6 = _mm256_blendv_ps(vx6, ve6, vx6);
const __m256 vy7 = _mm256_blendv_ps(vx7, ve7, vx7);
const __m256 vy8 = _mm256_blendv_ps(vx8, ve8, vx8);
_mm256_storeu_ps(output, vy0);
_mm256_storeu_ps(output + 8, vy1);
_mm256_storeu_ps(output + 16, vy2);
_mm256_storeu_ps(output + 24, vy3);
_mm256_storeu_ps(output + 32, vy4);
_mm256_storeu_ps(output + 40, vy5);
_mm256_storeu_ps(output + 48, vy6);
_mm256_storeu_ps(output + 56, vy7);
_mm256_storeu_ps(output + 64, vy8);
output += 72;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vx = _mm256_loadu_ps(input);
input += 8;
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
_mm256_storeu_ps(output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_lut4_p4.mask_table[7] - batch));
__m256 vx = _mm256_maskload_ps(input, vmask);
const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale));
__m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 21);
const __m256i vl = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn)));
__m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
__m256 vp = _mm256_fmadd_ps(vc4, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_mul_ps(vp, vt);
vt = _mm256_mul_ps(vt, vs);
vs = _mm256_fmsub_ps(vs, valpha, valpha);
vp = _mm256_fmadd_ps(vp, vt, vt);
const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs);
vx = _mm256_mul_ps(vx, vbeta);
const __m256 vy = _mm256_blendv_ps(vx, ve, vx);
__m128 vy_lo = _mm256_castps256_ps128(vy);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vy_lo);
vy_lo = _mm256_extractf128_ps(vy, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vy_lo);
vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vy_lo);
}
}
}
| 12,942 | 43.325342 | 125 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.