repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-minmax-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_minmax_ukernel__scalar_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va / vb;
vacc = math_max_f32(vacc, voutput_min);
vacc = math_min_f32(vacc, voutput_max);
*output++ = vacc;
}
}
| 1,103 | 25.285714 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-minmax-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_minmax_ukernel__scalar_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
input_b += 2;
float vacc0 = va0 / vb0;
float vacc1 = va1 / vb1;
vacc0 = math_max_f32(vacc0, voutput_min);
vacc1 = math_max_f32(vacc1, voutput_min);
vacc0 = math_min_f32(vacc0, voutput_max);
vacc1 = math_min_f32(vacc1, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
const float vb = *input_b;
float vacc = va / vb;
vacc = math_max_f32(vacc, voutput_min);
vacc = math_min_f32(vacc, voutput_max);
*output = vacc;
}
}
| 1,660 | 24.166667 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-minmax-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_minmax_ukernel__scalar_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
input_b += 4;
float vacc0 = va0 / vb0;
float vacc1 = va1 / vb1;
float vacc2 = va2 / vb2;
float vacc3 = va3 / vb3;
vacc0 = math_max_f32(vacc0, voutput_min);
vacc1 = math_max_f32(vacc1, voutput_min);
vacc2 = math_max_f32(vacc2, voutput_min);
vacc3 = math_max_f32(vacc3, voutput_min);
vacc0 = math_min_f32(vacc0, voutput_max);
vacc1 = math_min_f32(vacc1, voutput_max);
vacc2 = math_min_f32(vacc2, voutput_max);
vacc3 = math_min_f32(vacc3, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va / vb;
vacc = math_max_f32(vacc, voutput_min);
vacc = math_min_f32(vacc, voutput_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,131 | 25.65 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-minmax-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_minmax_ukernel__scalar_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
const float vb4 = input_b[4];
const float vb5 = input_b[5];
const float vb6 = input_b[6];
const float vb7 = input_b[7];
input_b += 8;
float vacc0 = va0 / vb0;
float vacc1 = va1 / vb1;
float vacc2 = va2 / vb2;
float vacc3 = va3 / vb3;
float vacc4 = va4 / vb4;
float vacc5 = va5 / vb5;
float vacc6 = va6 / vb6;
float vacc7 = va7 / vb7;
vacc0 = math_max_f32(vacc0, voutput_min);
vacc1 = math_max_f32(vacc1, voutput_min);
vacc2 = math_max_f32(vacc2, voutput_min);
vacc3 = math_max_f32(vacc3, voutput_min);
vacc4 = math_max_f32(vacc4, voutput_min);
vacc5 = math_max_f32(vacc5, voutput_min);
vacc6 = math_max_f32(vacc6, voutput_min);
vacc7 = math_max_f32(vacc7, voutput_min);
vacc0 = math_min_f32(vacc0, voutput_max);
vacc1 = math_min_f32(vacc1, voutput_max);
vacc2 = math_min_f32(vacc2, voutput_max);
vacc3 = math_min_f32(vacc3, voutput_max);
vacc4 = math_min_f32(vacc4, voutput_max);
vacc5 = math_min_f32(vacc5, voutput_max);
vacc6 = math_min_f32(vacc6, voutput_max);
vacc7 = math_min_f32(vacc7, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va / vb;
vacc = math_max_f32(vacc, voutput_min);
vacc = math_min_f32(vacc, voutput_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,979 | 27.653846 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-minmax-sse-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_minmax_ukernel__sse_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128 voutput_min = _mm_load_ps(params->sse.min);
const __m128 voutput_max = _mm_load_ps(params->sse.max);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 va = _mm_loadu_ps(input_a);
input_a += 4;
const __m128 vb = _mm_loadu_ps(input_b);
input_b += 4;
__m128 vacc = _mm_div_ps(va, vb);
vacc = _mm_max_ps(vacc, voutput_min);
vacc = _mm_min_ps(vacc, voutput_max);
_mm_storeu_ps(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 va = _mm_loadu_ps(input_a);
const __m128 vb = _mm_loadu_ps(input_b);
__m128 vacc = _mm_div_ps(va, vb);
vacc = _mm_max_ps(vacc, voutput_min);
vacc = _mm_min_ps(vacc, voutput_max);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 1,756 | 25.223881 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-minmax-sse-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_minmax_ukernel__sse_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128 voutput_min = _mm_load_ps(params->sse.min);
const __m128 voutput_max = _mm_load_ps(params->sse.max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(input_a);
const __m128 va1 = _mm_loadu_ps(input_a + 4);
input_a += 8;
const __m128 vb0 = _mm_loadu_ps(input_b);
const __m128 vb1 = _mm_loadu_ps(input_b + 4);
input_b += 8;
__m128 vacc0 = _mm_div_ps(va0, vb0);
__m128 vacc1 = _mm_div_ps(va1, vb1);
vacc0 = _mm_max_ps(vacc0, voutput_min);
vacc1 = _mm_max_ps(vacc1, voutput_min);
vacc0 = _mm_min_ps(vacc0, voutput_max);
vacc1 = _mm_min_ps(vacc1, voutput_max);
_mm_storeu_ps(output, vacc0);
_mm_storeu_ps(output + 4, vacc1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 va = _mm_loadu_ps(input_a);
input_a += 4;
const __m128 vb = _mm_loadu_ps(input_b);
input_b += 4;
__m128 vacc = _mm_div_ps(va, vb);
vacc = _mm_max_ps(vacc, voutput_min);
vacc = _mm_min_ps(vacc, voutput_max);
_mm_storeu_ps(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 va = _mm_loadu_ps(input_a);
const __m128 vb = _mm_loadu_ps(input_b);
__m128 vacc = _mm_div_ps(va, vb);
vacc = _mm_max_ps(vacc, voutput_min);
vacc = _mm_min_ps(vacc, voutput_max);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 2,408 | 25.766667 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-minmax-wasm-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_minmax_ukernel__wasm_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va / vb;
vacc = __builtin_wasm_max_f32(vacc, voutput_min);
vacc = __builtin_wasm_min_f32(vacc, voutput_max);
*output++ = vacc;
}
}
| 1,121 | 25.714286 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-minmax-wasm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_minmax_ukernel__wasm_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
input_b += 2;
float vacc0 = va0 / vb0;
float vacc1 = va1 / vb1;
vacc0 = __builtin_wasm_max_f32(vacc0, voutput_min);
vacc1 = __builtin_wasm_max_f32(vacc1, voutput_min);
vacc0 = __builtin_wasm_min_f32(vacc0, voutput_max);
vacc1 = __builtin_wasm_min_f32(vacc1, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
const float vb = *input_b;
float vacc = va / vb;
vacc = __builtin_wasm_max_f32(vacc, voutput_min);
vacc = __builtin_wasm_min_f32(vacc, voutput_max);
*output = vacc;
}
}
| 1,718 | 25.045455 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-minmax-wasm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_minmax_ukernel__wasm_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
input_b += 4;
float vacc0 = va0 / vb0;
float vacc1 = va1 / vb1;
float vacc2 = va2 / vb2;
float vacc3 = va3 / vb3;
vacc0 = __builtin_wasm_max_f32(vacc0, voutput_min);
vacc1 = __builtin_wasm_max_f32(vacc1, voutput_min);
vacc2 = __builtin_wasm_max_f32(vacc2, voutput_min);
vacc3 = __builtin_wasm_max_f32(vacc3, voutput_min);
vacc0 = __builtin_wasm_min_f32(vacc0, voutput_max);
vacc1 = __builtin_wasm_min_f32(vacc1, voutput_max);
vacc2 = __builtin_wasm_min_f32(vacc2, voutput_max);
vacc3 = __builtin_wasm_min_f32(vacc3, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va / vb;
vacc = __builtin_wasm_max_f32(vacc, voutput_min);
vacc = __builtin_wasm_min_f32(vacc, voutput_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,229 | 26.875 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-minmax-wasm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_minmax_ukernel__wasm_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
const float vb4 = input_b[4];
const float vb5 = input_b[5];
const float vb6 = input_b[6];
const float vb7 = input_b[7];
input_b += 8;
float vacc0 = va0 / vb0;
float vacc1 = va1 / vb1;
float vacc2 = va2 / vb2;
float vacc3 = va3 / vb3;
float vacc4 = va4 / vb4;
float vacc5 = va5 / vb5;
float vacc6 = va6 / vb6;
float vacc7 = va7 / vb7;
vacc0 = __builtin_wasm_max_f32(vacc0, voutput_min);
vacc1 = __builtin_wasm_max_f32(vacc1, voutput_min);
vacc2 = __builtin_wasm_max_f32(vacc2, voutput_min);
vacc3 = __builtin_wasm_max_f32(vacc3, voutput_min);
vacc4 = __builtin_wasm_max_f32(vacc4, voutput_min);
vacc5 = __builtin_wasm_max_f32(vacc5, voutput_min);
vacc6 = __builtin_wasm_max_f32(vacc6, voutput_min);
vacc7 = __builtin_wasm_max_f32(vacc7, voutput_min);
vacc0 = __builtin_wasm_min_f32(vacc0, voutput_max);
vacc1 = __builtin_wasm_min_f32(vacc1, voutput_max);
vacc2 = __builtin_wasm_min_f32(vacc2, voutput_max);
vacc3 = __builtin_wasm_min_f32(vacc3, voutput_max);
vacc4 = __builtin_wasm_min_f32(vacc4, voutput_max);
vacc5 = __builtin_wasm_min_f32(vacc5, voutput_max);
vacc6 = __builtin_wasm_min_f32(vacc6, voutput_max);
vacc7 = __builtin_wasm_min_f32(vacc7, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va / vb;
vacc = __builtin_wasm_max_f32(vacc, voutput_min);
vacc = __builtin_wasm_min_f32(vacc, voutput_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 3,157 | 29.365385 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-minmax-wasmsimd-arm-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_minmax_ukernel__wasmsimd_arm_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
const v128_t vb2 = wasm_v128_load(input_b + 8);
const v128_t vb3 = wasm_v128_load(input_b + 12);
input_b += 16;
v128_t vacc0 = wasm_f32x4_div(va0, vb0);
v128_t vacc1 = wasm_f32x4_div(va1, vb1);
v128_t vacc2 = wasm_f32x4_div(va2, vb2);
v128_t vacc3 = wasm_f32x4_div(va3, vb3);
vacc0 = wasm_f32x4_max(vacc0, voutput_min);
vacc1 = wasm_f32x4_max(vacc1, voutput_min);
vacc2 = wasm_f32x4_max(vacc2, voutput_min);
vacc3 = wasm_f32x4_max(vacc3, voutput_min);
vacc0 = wasm_f32x4_min(vacc0, voutput_max);
vacc1 = wasm_f32x4_min(vacc1, voutput_max);
vacc2 = wasm_f32x4_min(vacc2, voutput_max);
vacc3 = wasm_f32x4_min(vacc3, voutput_max);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
wasm_v128_store(output + 8, vacc2);
wasm_v128_store(output + 12, vacc3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_div(va, vb);
vacc = wasm_f32x4_max(vacc, voutput_min);
vacc = wasm_f32x4_min(vacc, voutput_max);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_div(va, vb);
vacc = wasm_f32x4_max(vacc, voutput_min);
vacc = wasm_f32x4_min(vacc, voutput_max);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 3,094 | 29.048544 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-minmax-wasmsimd-arm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_minmax_ukernel__wasmsimd_arm_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_div(va, vb);
vacc = wasm_f32x4_max(vacc, voutput_min);
vacc = wasm_f32x4_min(vacc, voutput_max);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_div(va, vb);
vacc = wasm_f32x4_max(vacc, voutput_min);
vacc = wasm_f32x4_min(vacc, voutput_max);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,827 | 25.882353 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-minmax-wasmsimd-arm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_minmax_ukernel__wasmsimd_arm_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
input_b += 8;
v128_t vacc0 = wasm_f32x4_div(va0, vb0);
v128_t vacc1 = wasm_f32x4_div(va1, vb1);
vacc0 = wasm_f32x4_max(vacc0, voutput_min);
vacc1 = wasm_f32x4_max(vacc1, voutput_min);
vacc0 = wasm_f32x4_min(vacc0, voutput_max);
vacc1 = wasm_f32x4_min(vacc1, voutput_max);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_div(va, vb);
vacc = wasm_f32x4_max(vacc, voutput_min);
vacc = wasm_f32x4_min(vacc, voutput_max);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_div(va, vb);
vacc = wasm_f32x4_max(vacc, voutput_min);
vacc = wasm_f32x4_min(vacc, voutput_max);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 2,515 | 26.648352 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-minmax-wasmsimd-x86-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_minmax_ukernel__wasmsimd_x86_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
const v128_t vb2 = wasm_v128_load(input_b + 8);
const v128_t vb3 = wasm_v128_load(input_b + 12);
input_b += 16;
v128_t vacc0 = wasm_f32x4_div(va0, vb0);
v128_t vacc1 = wasm_f32x4_div(va1, vb1);
v128_t vacc2 = wasm_f32x4_div(va2, vb2);
v128_t vacc3 = wasm_f32x4_div(va3, vb3);
vacc0 = wasm_f32x4_pmax(voutput_min, vacc0);
vacc1 = wasm_f32x4_pmax(voutput_min, vacc1);
vacc2 = wasm_f32x4_pmax(voutput_min, vacc2);
vacc3 = wasm_f32x4_pmax(voutput_min, vacc3);
vacc0 = wasm_f32x4_pmin(voutput_max, vacc0);
vacc1 = wasm_f32x4_pmin(voutput_max, vacc1);
vacc2 = wasm_f32x4_pmin(voutput_max, vacc2);
vacc3 = wasm_f32x4_pmin(voutput_max, vacc3);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
wasm_v128_store(output + 8, vacc2);
wasm_v128_store(output + 12, vacc3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_div(va, vb);
vacc = wasm_f32x4_pmax(voutput_min, vacc);
vacc = wasm_f32x4_pmin(voutput_max, vacc);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_div(va, vb);
vacc = wasm_f32x4_pmax(voutput_min, vacc);
vacc = wasm_f32x4_pmin(voutput_max, vacc);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 3,106 | 29.165049 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-minmax-wasmsimd-x86-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_minmax_ukernel__wasmsimd_x86_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_div(va, vb);
vacc = wasm_f32x4_pmax(voutput_min, vacc);
vacc = wasm_f32x4_pmin(voutput_max, vacc);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_div(va, vb);
vacc = wasm_f32x4_pmax(voutput_min, vacc);
vacc = wasm_f32x4_pmin(voutput_max, vacc);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,831 | 25.941176 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-minmax-wasmsimd-x86-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_minmax_ukernel__wasmsimd_x86_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
input_b += 8;
v128_t vacc0 = wasm_f32x4_div(va0, vb0);
v128_t vacc1 = wasm_f32x4_div(va1, vb1);
vacc0 = wasm_f32x4_pmax(voutput_min, vacc0);
vacc1 = wasm_f32x4_pmax(voutput_min, vacc1);
vacc0 = wasm_f32x4_pmin(voutput_max, vacc0);
vacc1 = wasm_f32x4_pmin(voutput_max, vacc1);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_div(va, vb);
vacc = wasm_f32x4_pmax(voutput_min, vacc);
vacc = wasm_f32x4_pmin(voutput_max, vacc);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_div(va, vb);
vacc = wasm_f32x4_pmax(voutput_min, vacc);
vacc = wasm_f32x4_pmin(voutput_max, vacc);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 2,523 | 26.736264 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-relu-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_relu_ukernel__scalar_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va / vb;
vacc = math_max_f32(vacc, 0.0f);
*output++ = vacc;
}
}
| 952 | 23.435897 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-relu-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_relu_ukernel__scalar_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
input_b += 2;
float vacc0 = va0 / vb0;
float vacc1 = va1 / vb1;
vacc0 = math_max_f32(vacc0, 0.0f);
vacc1 = math_max_f32(vacc1, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
const float vb = *input_b;
float vacc = va / vb;
vacc = math_max_f32(vacc, 0.0f);
*output = vacc;
}
}
| 1,402 | 22.383333 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-relu-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_relu_ukernel__scalar_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
input_b += 4;
float vacc0 = va0 / vb0;
float vacc1 = va1 / vb1;
float vacc2 = va2 / vb2;
float vacc3 = va3 / vb3;
vacc0 = math_max_f32(vacc0, 0.0f);
vacc1 = math_max_f32(vacc1, 0.0f);
vacc2 = math_max_f32(vacc2, 0.0f);
vacc3 = math_max_f32(vacc3, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va / vb;
vacc = math_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,765 | 23.527778 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-relu-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_relu_ukernel__scalar_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
const float vb4 = input_b[4];
const float vb5 = input_b[5];
const float vb6 = input_b[6];
const float vb7 = input_b[7];
input_b += 8;
float vacc0 = va0 / vb0;
float vacc1 = va1 / vb1;
float vacc2 = va2 / vb2;
float vacc3 = va3 / vb3;
float vacc4 = va4 / vb4;
float vacc5 = va5 / vb5;
float vacc6 = va6 / vb6;
float vacc7 = va7 / vb7;
vacc0 = math_max_f32(vacc0, 0.0f);
vacc1 = math_max_f32(vacc1, 0.0f);
vacc2 = math_max_f32(vacc2, 0.0f);
vacc3 = math_max_f32(vacc3, 0.0f);
vacc4 = math_max_f32(vacc4, 0.0f);
vacc5 = math_max_f32(vacc5, 0.0f);
vacc6 = math_max_f32(vacc6, 0.0f);
vacc7 = math_max_f32(vacc7, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va / vb;
vacc = math_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,401 | 25.108696 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-relu-wasm-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_relu_ukernel__wasm_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va / vb;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output++ = vacc;
}
}
| 960 | 23.641026 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-relu-wasm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_relu_ukernel__wasm_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
input_b += 2;
float vacc0 = va0 / vb0;
float vacc1 = va1 / vb1;
vacc0 = __builtin_wasm_max_f32(vacc0, 0.0f);
vacc1 = __builtin_wasm_max_f32(vacc1, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
const float vb = *input_b;
float vacc = va / vb;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output = vacc;
}
}
| 1,430 | 22.85 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-relu-wasm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_relu_ukernel__wasm_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
input_b += 4;
float vacc0 = va0 / vb0;
float vacc1 = va1 / vb1;
float vacc2 = va2 / vb2;
float vacc3 = va3 / vb3;
vacc0 = __builtin_wasm_max_f32(vacc0, 0.0f);
vacc1 = __builtin_wasm_max_f32(vacc1, 0.0f);
vacc2 = __builtin_wasm_max_f32(vacc2, 0.0f);
vacc3 = __builtin_wasm_max_f32(vacc3, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va / vb;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,813 | 24.194444 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-relu-wasm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_relu_ukernel__wasm_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
const float vb4 = input_b[4];
const float vb5 = input_b[5];
const float vb6 = input_b[6];
const float vb7 = input_b[7];
input_b += 8;
float vacc0 = va0 / vb0;
float vacc1 = va1 / vb1;
float vacc2 = va2 / vb2;
float vacc3 = va3 / vb3;
float vacc4 = va4 / vb4;
float vacc5 = va5 / vb5;
float vacc6 = va6 / vb6;
float vacc7 = va7 / vb7;
vacc0 = __builtin_wasm_max_f32(vacc0, 0.0f);
vacc1 = __builtin_wasm_max_f32(vacc1, 0.0f);
vacc2 = __builtin_wasm_max_f32(vacc2, 0.0f);
vacc3 = __builtin_wasm_max_f32(vacc3, 0.0f);
vacc4 = __builtin_wasm_max_f32(vacc4, 0.0f);
vacc5 = __builtin_wasm_max_f32(vacc5, 0.0f);
vacc6 = __builtin_wasm_max_f32(vacc6, 0.0f);
vacc7 = __builtin_wasm_max_f32(vacc7, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va / vb;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,489 | 26.065217 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-relu-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_relu_ukernel__wasmsimd_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vzero = wasm_i32x4_const_splat(0);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
const v128_t vb2 = wasm_v128_load(input_b + 8);
const v128_t vb3 = wasm_v128_load(input_b + 12);
input_b += 16;
v128_t vacc0 = wasm_f32x4_div(va0, vb0);
v128_t vacc1 = wasm_f32x4_div(va1, vb1);
v128_t vacc2 = wasm_f32x4_div(va2, vb2);
v128_t vacc3 = wasm_f32x4_div(va3, vb3);
vacc0 = wasm_i32x4_max(vacc0, vzero);
vacc1 = wasm_i32x4_max(vacc1, vzero);
vacc2 = wasm_i32x4_max(vacc2, vzero);
vacc3 = wasm_i32x4_max(vacc3, vzero);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
wasm_v128_store(output + 8, vacc2);
wasm_v128_store(output + 12, vacc3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_div(va, vb);
vacc = wasm_i32x4_max(vacc, vzero);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_div(va, vb);
vacc = wasm_i32x4_max(vacc, vzero);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 2,665 | 27.063158 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-relu-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_relu_ukernel__wasmsimd_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vzero = wasm_i32x4_const_splat(0);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_div(va, vb);
vacc = wasm_i32x4_max(vacc, vzero);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_div(va, vb);
vacc = wasm_i32x4_max(vacc, vzero);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,615 | 23.861538 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-relu-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_relu_ukernel__wasmsimd_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vzero = wasm_i32x4_const_splat(0);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
input_b += 8;
v128_t vacc0 = wasm_f32x4_div(va0, vb0);
v128_t vacc1 = wasm_f32x4_div(va1, vb1);
vacc0 = wasm_i32x4_max(vacc0, vzero);
vacc1 = wasm_i32x4_max(vacc1, vzero);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_div(va, vb);
vacc = wasm_i32x4_max(vacc, vzero);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_div(va, vb);
vacc = wasm_i32x4_max(vacc, vzero);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 2,194 | 24.823529 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_ukernel__scalar_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va / vb;
*output++ = vacc;
}
}
| 913 | 23.052632 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_ukernel__scalar_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
input_b += 2;
float vacc0 = va0 / vb0;
float vacc1 = va1 / vb1;
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
const float vb = *input_b;
float vacc = va / vb;
*output = vacc;
}
}
| 1,285 | 21.561404 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_ukernel__scalar_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
input_b += 4;
float vacc0 = va0 / vb0;
float vacc1 = va1 / vb1;
float vacc2 = va2 / vb2;
float vacc3 = va3 / vb3;
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va / vb;
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,568 | 22.41791 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_ukernel__scalar_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
const float vb4 = input_b[4];
const float vb5 = input_b[5];
const float vb6 = input_b[6];
const float vb7 = input_b[7];
input_b += 8;
float vacc0 = va0 / vb0;
float vacc1 = va1 / vb1;
float vacc2 = va2 / vb2;
float vacc3 = va3 / vb3;
float vacc4 = va4 / vb4;
float vacc5 = va5 / vb5;
float vacc6 = va6 / vb6;
float vacc7 = va7 / vb7;
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va / vb;
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,048 | 23.686747 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_ukernel__wasmsimd_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
const v128_t vb2 = wasm_v128_load(input_b + 8);
const v128_t vb3 = wasm_v128_load(input_b + 12);
input_b += 16;
v128_t vacc0 = wasm_f32x4_div(va0, vb0);
v128_t vacc1 = wasm_f32x4_div(va1, vb1);
v128_t vacc2 = wasm_f32x4_div(va2, vb2);
v128_t vacc3 = wasm_f32x4_div(va3, vb3);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
wasm_v128_store(output + 8, vacc2);
wasm_v128_store(output + 12, vacc3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_div(va, vb);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_div(va, vb);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 2,365 | 25.886364 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_ukernel__wasmsimd_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_div(va, vb);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_div(va, vb);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,483 | 22.935484 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_ukernel__wasmsimd_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
input_b += 8;
v128_t vacc0 = wasm_f32x4_div(va0, vb0);
v128_t vacc1 = wasm_f32x4_div(va1, vb1);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_div(va, vb);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_div(va, vb);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,978 | 23.7375 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-minmax-aarch64-neon-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-neon.c.in
// Generator: tools/xngen
//
// Copyright 2_lo9 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_minmax_ukernel__aarch64_neon_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float32x4_t voutput_min = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t voutput_max = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vb = vld1q_dup_f32(input_b);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t va = vld1q_f32(input_a); input_a += 4;
float32x4_t vacc = vdivq_f32(va, vb);
vacc = vmaxq_f32(vacc, voutput_min);
vacc = vminq_f32(vacc, voutput_max);
vst1q_f32(output, vacc); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t va = vld1q_f32(input_a);
float32x4_t vacc = vdivq_f32(va, vb);
vacc = vmaxq_f32(vacc, voutput_min);
vacc = vminq_f32(vacc, voutput_max);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 1,718 | 27.180328 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-minmax-aarch64-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-neon.c.in
// Generator: tools/xngen
//
// Copyright 2_lo9 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_minmax_ukernel__aarch64_neon_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float32x4_t voutput_min = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t voutput_max = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vb = vld1q_dup_f32(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vacc_ = vld1q_f32(input_a); input_a += 4;
float32x4_t vaccl = vld1q_f32(input_a); input_a += 4;
vacc_ = vdivq_f32(vacc_, vb);
vaccl = vdivq_f32(vaccl, vb);
vacc_ = vmaxq_f32(vacc_, voutput_min);
vaccl = vmaxq_f32(vaccl, voutput_min);
vacc_ = vminq_f32(vacc_, voutput_max);
vaccl = vminq_f32(vaccl, voutput_max);
vst1q_f32(output, vacc_); output += 4;
vst1q_f32(output, vaccl); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t va = vld1q_f32(input_a); input_a += 4;
float32x4_t vacc = vdivq_f32(va, vb);
vacc = vmaxq_f32(vacc, voutput_min);
vacc = vminq_f32(vacc, voutput_max);
vst1q_f32(output, vacc); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t va = vld1q_f32(input_a);
float32x4_t vacc = vdivq_f32(va, vb);
vacc = vmaxq_f32(vacc, voutput_min);
vacc = vminq_f32(vacc, voutput_max);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 2,236 | 27.679487 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-minmax-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_minmax_ukernel__avx_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m256 voutput_min = _mm256_load_ps(params->avx.min);
const __m256 voutput_max = _mm256_load_ps(params->avx.max);
const __m256 vb = _mm256_broadcast_ss(input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m256 vacc0 = _mm256_loadu_ps(input_a);
__m256 vacc1 = _mm256_loadu_ps(input_a + 8);
input_a += 16;
vacc0 = _mm256_div_ps(vacc0, vb);
vacc1 = _mm256_div_ps(vacc1, vb);
vacc0 = _mm256_max_ps(voutput_min, vacc0);
vacc1 = _mm256_max_ps(voutput_min, vacc1);
vacc0 = _mm256_min_ps(voutput_max, vacc0);
vacc1 = _mm256_min_ps(voutput_max, vacc1);
_mm256_storeu_ps(output, vacc0);
_mm256_storeu_ps(output + 8, vacc1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vacc = _mm256_loadu_ps(input_a);
input_a += 8;
vacc = _mm256_div_ps(vacc, vb);
vacc = _mm256_max_ps(voutput_min, vacc);
vacc = _mm256_min_ps(voutput_max, vacc);
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vacc = _mm256_maskload_ps(input_a, vmask);
vacc = _mm256_div_ps(vacc, vb);
vacc = _mm256_max_ps(voutput_min, vacc);
vacc = _mm256_min_ps(voutput_max, vacc);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 2,630 | 27.912088 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-minmax-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_minmax_ukernel__avx_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m256 voutput_min = _mm256_load_ps(params->avx.min);
const __m256 voutput_max = _mm256_load_ps(params->avx.max);
const __m256 vb = _mm256_broadcast_ss(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vacc = _mm256_loadu_ps(input_a);
input_a += 8;
vacc = _mm256_div_ps(vacc, vb);
vacc = _mm256_max_ps(voutput_min, vacc);
vacc = _mm256_min_ps(voutput_max, vacc);
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vacc = _mm256_maskload_ps(input_a, vmask);
vacc = _mm256_div_ps(vacc, vb);
vacc = _mm256_max_ps(voutput_min, vacc);
vacc = _mm256_min_ps(voutput_max, vacc);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 2,078 | 27.875 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-minmax-avx512f-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_minmax_ukernel__avx512f_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m512 voutput_min = _mm512_set1_ps(params->scalar.min);
const __m512 voutput_max = _mm512_set1_ps(params->scalar.max);
const __m512 vb = _mm512_set1_ps(*input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc0 = _mm512_loadu_ps(input_a);
input_a += 16;
vacc0 = _mm512_div_ps(vacc0, vb);
vacc0 = _mm512_max_ps(voutput_min, vacc0);
vacc0 = _mm512_min_ps(voutput_max, vacc0);
_mm512_storeu_ps(output, vacc0);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a);
vacc = _mm512_maskz_div_ps(vmask, vacc, vb);
vacc = _mm512_maskz_max_ps(vmask, voutput_min, vacc);
vacc = _mm512_maskz_min_ps(vmask, voutput_max, vacc);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 1,888 | 28.515625 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-minmax-avx512f-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_minmax_ukernel__avx512f_x32(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m512 voutput_min = _mm512_set1_ps(params->scalar.min);
const __m512 voutput_max = _mm512_set1_ps(params->scalar.max);
const __m512 vb = _mm512_set1_ps(*input_b);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m512 vacc0 = _mm512_loadu_ps(input_a);
__m512 vacc1 = _mm512_loadu_ps(input_a + 16);
input_a += 32;
vacc0 = _mm512_div_ps(vacc0, vb);
vacc1 = _mm512_div_ps(vacc1, vb);
vacc0 = _mm512_max_ps(voutput_min, vacc0);
vacc1 = _mm512_max_ps(voutput_min, vacc1);
vacc0 = _mm512_min_ps(voutput_max, vacc0);
vacc1 = _mm512_min_ps(voutput_max, vacc1);
_mm512_storeu_ps(output, vacc0);
_mm512_storeu_ps(output + 16, vacc1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc = _mm512_loadu_ps(input_a);
input_a += 16;
vacc = _mm512_div_ps(vacc, vb);
vacc = _mm512_max_ps(voutput_min, vacc);
vacc = _mm512_min_ps(voutput_max, vacc);
_mm512_storeu_ps(output, vacc);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a);
vacc = _mm512_maskz_div_ps(vmask, vacc, vb);
vacc = _mm512_maskz_max_ps(vmask, voutput_min, vacc);
vacc = _mm512_maskz_min_ps(vmask, voutput_max, vacc);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 2,430 | 29.3875 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-minmax-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_minmax_ukernel__scalar_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
const float vb = *input_b;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
float vacc = va / vb;
vacc = math_max_f32(vacc, voutput_min);
vacc = math_min_f32(vacc, voutput_max);
*output++ = vacc;
}
}
| 1,101 | 25.238095 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-minmax-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_minmax_ukernel__scalar_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
const float vb = *input_b;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
float vacc0 = va0 / vb;
float vacc1 = va1 / vb;
vacc0 = math_max_f32(vacc0, voutput_min);
vacc1 = math_max_f32(vacc1, voutput_min);
vacc0 = math_min_f32(vacc0, voutput_max);
vacc1 = math_min_f32(vacc1, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
float vacc = va / vb;
vacc = math_max_f32(vacc, voutput_min);
vacc = math_min_f32(vacc, voutput_max);
*output = vacc;
}
}
| 1,571 | 24.354839 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-minmax-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_minmax_ukernel__scalar_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
const float vb = *input_b;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
float vacc0 = va0 / vb;
float vacc1 = va1 / vb;
float vacc2 = va2 / vb;
float vacc3 = va3 / vb;
vacc0 = math_max_f32(vacc0, voutput_min);
vacc1 = math_max_f32(vacc1, voutput_min);
vacc2 = math_max_f32(vacc2, voutput_min);
vacc3 = math_max_f32(vacc3, voutput_min);
vacc0 = math_min_f32(vacc0, voutput_max);
vacc1 = math_min_f32(vacc1, voutput_max);
vacc2 = math_min_f32(vacc2, voutput_max);
vacc3 = math_min_f32(vacc3, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va / vb;
vacc = math_max_f32(vacc, voutput_min);
vacc = math_min_f32(vacc, voutput_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,968 | 25.608108 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-minmax-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_minmax_ukernel__scalar_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
const float vb = *input_b;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
float vacc0 = va0 / vb;
float vacc1 = va1 / vb;
float vacc2 = va2 / vb;
float vacc3 = va3 / vb;
float vacc4 = va4 / vb;
float vacc5 = va5 / vb;
float vacc6 = va6 / vb;
float vacc7 = va7 / vb;
vacc0 = math_max_f32(vacc0, voutput_min);
vacc1 = math_max_f32(vacc1, voutput_min);
vacc2 = math_max_f32(vacc2, voutput_min);
vacc3 = math_max_f32(vacc3, voutput_min);
vacc4 = math_max_f32(vacc4, voutput_min);
vacc5 = math_max_f32(vacc5, voutput_min);
vacc6 = math_max_f32(vacc6, voutput_min);
vacc7 = math_max_f32(vacc7, voutput_min);
vacc0 = math_min_f32(vacc0, voutput_max);
vacc1 = math_min_f32(vacc1, voutput_max);
vacc2 = math_min_f32(vacc2, voutput_max);
vacc3 = math_min_f32(vacc3, voutput_max);
vacc4 = math_min_f32(vacc4, voutput_max);
vacc5 = math_min_f32(vacc5, voutput_max);
vacc6 = math_min_f32(vacc6, voutput_max);
vacc7 = math_min_f32(vacc7, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va / vb;
vacc = math_max_f32(vacc, voutput_min);
vacc = math_min_f32(vacc, voutput_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,676 | 27.478723 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-minmax-sse-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_minmax_ukernel__sse_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128 voutput_min = _mm_load_ps(params->sse.min);
const __m128 voutput_max = _mm_load_ps(params->sse.max);
const __m128 vb = _mm_load1_ps(input_b);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 va = _mm_loadu_ps(input_a);
input_a += 4;
__m128 vacc = _mm_div_ps(va, vb);
vacc = _mm_max_ps(vacc, voutput_min);
vacc = _mm_min_ps(vacc, voutput_max);
_mm_storeu_ps(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 va = _mm_loadu_ps(input_a);
__m128 vacc = _mm_div_ps(va, vb);
vacc = _mm_max_ps(vacc, voutput_min);
vacc = _mm_min_ps(vacc, voutput_max);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 1,691 | 25.857143 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-minmax-sse-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_minmax_ukernel__sse_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128 voutput_min = _mm_load_ps(params->sse.min);
const __m128 voutput_max = _mm_load_ps(params->sse.max);
const __m128 vb = _mm_load1_ps(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(input_a);
const __m128 va1 = _mm_loadu_ps(input_a + 4);
input_a += 8;
__m128 vacc0 = _mm_div_ps(va0, vb);
__m128 vacc1 = _mm_div_ps(va1, vb);
vacc0 = _mm_max_ps(vacc0, voutput_min);
vacc1 = _mm_max_ps(vacc1, voutput_min);
vacc0 = _mm_min_ps(vacc0, voutput_max);
vacc1 = _mm_min_ps(vacc1, voutput_max);
_mm_storeu_ps(output, vacc0);
_mm_storeu_ps(output + 4, vacc1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 va = _mm_loadu_ps(input_a);
input_a += 4;
__m128 vacc = _mm_div_ps(va, vb);
vacc = _mm_max_ps(vacc, voutput_min);
vacc = _mm_min_ps(vacc, voutput_max);
_mm_storeu_ps(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 va = _mm_loadu_ps(input_a);
__m128 vacc = _mm_div_ps(va, vb);
vacc = _mm_max_ps(vacc, voutput_min);
vacc = _mm_min_ps(vacc, voutput_max);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 2,226 | 26.158537 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-minmax-wasm-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_minmax_ukernel__wasm_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
const float vb = *input_b;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
float vacc = va / vb;
vacc = __builtin_wasm_max_f32(vacc, voutput_min);
vacc = __builtin_wasm_min_f32(vacc, voutput_max);
*output++ = vacc;
}
}
| 1,119 | 25.666667 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-minmax-wasm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_minmax_ukernel__wasm_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
const float vb = *input_b;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
float vacc0 = va0 / vb;
float vacc1 = va1 / vb;
vacc0 = __builtin_wasm_max_f32(vacc0, voutput_min);
vacc1 = __builtin_wasm_max_f32(vacc1, voutput_min);
vacc0 = __builtin_wasm_min_f32(vacc0, voutput_max);
vacc1 = __builtin_wasm_min_f32(vacc1, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
float vacc = va / vb;
vacc = __builtin_wasm_max_f32(vacc, voutput_min);
vacc = __builtin_wasm_min_f32(vacc, voutput_max);
*output = vacc;
}
}
| 1,629 | 25.290323 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-minmax-wasm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_minmax_ukernel__wasm_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
const float vb = *input_b;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
float vacc0 = va0 / vb;
float vacc1 = va1 / vb;
float vacc2 = va2 / vb;
float vacc3 = va3 / vb;
vacc0 = __builtin_wasm_max_f32(vacc0, voutput_min);
vacc1 = __builtin_wasm_max_f32(vacc1, voutput_min);
vacc2 = __builtin_wasm_max_f32(vacc2, voutput_min);
vacc3 = __builtin_wasm_max_f32(vacc3, voutput_min);
vacc0 = __builtin_wasm_min_f32(vacc0, voutput_max);
vacc1 = __builtin_wasm_min_f32(vacc1, voutput_max);
vacc2 = __builtin_wasm_min_f32(vacc2, voutput_max);
vacc3 = __builtin_wasm_min_f32(vacc3, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va / vb;
vacc = __builtin_wasm_max_f32(vacc, voutput_min);
vacc = __builtin_wasm_min_f32(vacc, voutput_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,066 | 26.932432 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-minmax-wasm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_minmax_ukernel__wasm_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
const float vb = *input_b;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
float vacc0 = va0 / vb;
float vacc1 = va1 / vb;
float vacc2 = va2 / vb;
float vacc3 = va3 / vb;
float vacc4 = va4 / vb;
float vacc5 = va5 / vb;
float vacc6 = va6 / vb;
float vacc7 = va7 / vb;
vacc0 = __builtin_wasm_max_f32(vacc0, voutput_min);
vacc1 = __builtin_wasm_max_f32(vacc1, voutput_min);
vacc2 = __builtin_wasm_max_f32(vacc2, voutput_min);
vacc3 = __builtin_wasm_max_f32(vacc3, voutput_min);
vacc4 = __builtin_wasm_max_f32(vacc4, voutput_min);
vacc5 = __builtin_wasm_max_f32(vacc5, voutput_min);
vacc6 = __builtin_wasm_max_f32(vacc6, voutput_min);
vacc7 = __builtin_wasm_max_f32(vacc7, voutput_min);
vacc0 = __builtin_wasm_min_f32(vacc0, voutput_max);
vacc1 = __builtin_wasm_min_f32(vacc1, voutput_max);
vacc2 = __builtin_wasm_min_f32(vacc2, voutput_max);
vacc3 = __builtin_wasm_min_f32(vacc3, voutput_max);
vacc4 = __builtin_wasm_min_f32(vacc4, voutput_max);
vacc5 = __builtin_wasm_min_f32(vacc5, voutput_max);
vacc6 = __builtin_wasm_min_f32(vacc6, voutput_max);
vacc7 = __builtin_wasm_min_f32(vacc7, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va / vb;
vacc = __builtin_wasm_max_f32(vacc, voutput_min);
vacc = __builtin_wasm_min_f32(vacc, voutput_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,854 | 29.37234 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-minmax-wasmsimd-arm-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_minmax_ukernel__wasmsimd_arm_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
v128_t vy0 = wasm_f32x4_div(va0, vb);
v128_t vy1 = wasm_f32x4_div(va1, vb);
v128_t vy2 = wasm_f32x4_div(va2, vb);
v128_t vy3 = wasm_f32x4_div(va3, vb);
vy0 = wasm_f32x4_max(vy0, voutput_min);
vy1 = wasm_f32x4_max(vy1, voutput_min);
vy2 = wasm_f32x4_max(vy2, voutput_min);
vy3 = wasm_f32x4_max(vy3, voutput_min);
vy0 = wasm_f32x4_min(vy0, voutput_max);
vy1 = wasm_f32x4_min(vy1, voutput_max);
vy2 = wasm_f32x4_min(vy2, voutput_max);
vy3 = wasm_f32x4_min(vy3, voutput_max);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
wasm_v128_store(output + 8, vy2);
wasm_v128_store(output + 12, vy3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_div(va, vb);
vy = wasm_f32x4_max(vy, voutput_min);
vy = wasm_f32x4_min(vy, voutput_max);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_div(va, vb);
vy = wasm_f32x4_max(vy, voutput_min);
vy = wasm_f32x4_min(vy, voutput_max);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,727 | 28.021277 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-minmax-wasmsimd-arm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_minmax_ukernel__wasmsimd_arm_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_div(va, vb);
vy = wasm_f32x4_max(vy, voutput_min);
vy = wasm_f32x4_min(vy, voutput_max);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_div(va, vb);
vy = wasm_f32x4_max(vy, voutput_min);
vy = wasm_f32x4_min(vy, voutput_max);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,737 | 25.738462 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-minmax-wasmsimd-arm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_minmax_ukernel__wasmsimd_arm_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
v128_t vy0 = wasm_f32x4_div(va0, vb);
v128_t vy1 = wasm_f32x4_div(va1, vb);
vy0 = wasm_f32x4_max(vy0, voutput_min);
vy1 = wasm_f32x4_max(vy1, voutput_min);
vy0 = wasm_f32x4_min(vy0, voutput_max);
vy1 = wasm_f32x4_min(vy1, voutput_max);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_div(va, vb);
vy = wasm_f32x4_max(vy, voutput_min);
vy = wasm_f32x4_min(vy, voutput_max);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_div(va, vb);
vy = wasm_f32x4_max(vy, voutput_min);
vy = wasm_f32x4_min(vy, voutput_max);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,280 | 26.154762 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-minmax-wasmsimd-x86-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_minmax_ukernel__wasmsimd_x86_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
v128_t vy0 = wasm_f32x4_div(va0, vb);
v128_t vy1 = wasm_f32x4_div(va1, vb);
v128_t vy2 = wasm_f32x4_div(va2, vb);
v128_t vy3 = wasm_f32x4_div(va3, vb);
vy0 = wasm_f32x4_pmax(voutput_min, vy0);
vy1 = wasm_f32x4_pmax(voutput_min, vy1);
vy2 = wasm_f32x4_pmax(voutput_min, vy2);
vy3 = wasm_f32x4_pmax(voutput_min, vy3);
vy0 = wasm_f32x4_pmin(voutput_max, vy0);
vy1 = wasm_f32x4_pmin(voutput_max, vy1);
vy2 = wasm_f32x4_pmin(voutput_max, vy2);
vy3 = wasm_f32x4_pmin(voutput_max, vy3);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
wasm_v128_store(output + 8, vy2);
wasm_v128_store(output + 12, vy3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_div(va, vb);
vy = wasm_f32x4_pmax(voutput_min, vy);
vy = wasm_f32x4_pmin(voutput_max, vy);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_div(va, vb);
vy = wasm_f32x4_pmax(voutput_min, vy);
vy = wasm_f32x4_pmin(voutput_max, vy);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,739 | 28.148936 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-minmax-wasmsimd-x86-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_minmax_ukernel__wasmsimd_x86_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_div(va, vb);
vy = wasm_f32x4_pmax(voutput_min, vy);
vy = wasm_f32x4_pmin(voutput_max, vy);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_div(va, vb);
vy = wasm_f32x4_pmax(voutput_min, vy);
vy = wasm_f32x4_pmin(voutput_max, vy);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,741 | 25.8 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-minmax-wasmsimd-x86-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_minmax_ukernel__wasmsimd_x86_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
v128_t vy0 = wasm_f32x4_div(va0, vb);
v128_t vy1 = wasm_f32x4_div(va1, vb);
vy0 = wasm_f32x4_pmax(voutput_min, vy0);
vy1 = wasm_f32x4_pmax(voutput_min, vy1);
vy0 = wasm_f32x4_pmin(voutput_max, vy0);
vy1 = wasm_f32x4_pmin(voutput_max, vy1);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_div(va, vb);
vy = wasm_f32x4_pmax(voutput_min, vy);
vy = wasm_f32x4_pmin(voutput_max, vy);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_div(va, vb);
vy = wasm_f32x4_pmax(voutput_min, vy);
vy = wasm_f32x4_pmin(voutput_max, vy);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,288 | 26.25 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-relu-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_relu_ukernel__scalar_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
float vacc = va / vb;
vacc = math_max_f32(vacc, 0.0f);
*output++ = vacc;
}
}
| 950 | 23.384615 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-relu-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_relu_ukernel__scalar_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
float vacc0 = va0 / vb;
float vacc1 = va1 / vb;
vacc0 = math_max_f32(vacc0, 0.0f);
vacc1 = math_max_f32(vacc1, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
float vacc = va / vb;
vacc = math_max_f32(vacc, 0.0f);
*output = vacc;
}
}
| 1,313 | 22.464286 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-relu-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_relu_ukernel__scalar_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
float vacc0 = va0 / vb;
float vacc1 = va1 / vb;
float vacc2 = va2 / vb;
float vacc3 = va3 / vb;
vacc0 = math_max_f32(vacc0, 0.0f);
vacc1 = math_max_f32(vacc1, 0.0f);
vacc2 = math_max_f32(vacc2, 0.0f);
vacc3 = math_max_f32(vacc3, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va / vb;
vacc = math_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,602 | 23.287879 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-relu-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_relu_ukernel__scalar_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
float vacc0 = va0 / vb;
float vacc1 = va1 / vb;
float vacc2 = va2 / vb;
float vacc3 = va3 / vb;
float vacc4 = va4 / vb;
float vacc5 = va5 / vb;
float vacc6 = va6 / vb;
float vacc7 = va7 / vb;
vacc0 = math_max_f32(vacc0, 0.0f);
vacc1 = math_max_f32(vacc1, 0.0f);
vacc2 = math_max_f32(vacc2, 0.0f);
vacc3 = math_max_f32(vacc3, 0.0f);
vacc4 = math_max_f32(vacc4, 0.0f);
vacc5 = math_max_f32(vacc5, 0.0f);
vacc6 = math_max_f32(vacc6, 0.0f);
vacc7 = math_max_f32(vacc7, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va / vb;
vacc = math_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,098 | 24.597561 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-relu-wasm-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_relu_ukernel__wasm_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
float vacc = va / vb;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output++ = vacc;
}
}
| 958 | 23.589744 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-relu-wasm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_relu_ukernel__wasm_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
float vacc0 = va0 / vb;
float vacc1 = va1 / vb;
vacc0 = __builtin_wasm_max_f32(vacc0, 0.0f);
vacc1 = __builtin_wasm_max_f32(vacc1, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
float vacc = va / vb;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output = vacc;
}
}
| 1,341 | 22.964286 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-relu-wasm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_relu_ukernel__wasm_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
float vacc0 = va0 / vb;
float vacc1 = va1 / vb;
float vacc2 = va2 / vb;
float vacc3 = va3 / vb;
vacc0 = __builtin_wasm_max_f32(vacc0, 0.0f);
vacc1 = __builtin_wasm_max_f32(vacc1, 0.0f);
vacc2 = __builtin_wasm_max_f32(vacc2, 0.0f);
vacc3 = __builtin_wasm_max_f32(vacc3, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va / vb;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,650 | 24.015152 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-relu-wasm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_relu_ukernel__wasm_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
float vacc0 = va0 / vb;
float vacc1 = va1 / vb;
float vacc2 = va2 / vb;
float vacc3 = va3 / vb;
float vacc4 = va4 / vb;
float vacc5 = va5 / vb;
float vacc6 = va6 / vb;
float vacc7 = va7 / vb;
vacc0 = __builtin_wasm_max_f32(vacc0, 0.0f);
vacc1 = __builtin_wasm_max_f32(vacc1, 0.0f);
vacc2 = __builtin_wasm_max_f32(vacc2, 0.0f);
vacc3 = __builtin_wasm_max_f32(vacc3, 0.0f);
vacc4 = __builtin_wasm_max_f32(vacc4, 0.0f);
vacc5 = __builtin_wasm_max_f32(vacc5, 0.0f);
vacc6 = __builtin_wasm_max_f32(vacc6, 0.0f);
vacc7 = __builtin_wasm_max_f32(vacc7, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va / vb;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,186 | 25.670732 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-relu-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_relu_ukernel__wasmsimd_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vzero = wasm_i32x4_const_splat(0);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
v128_t vy0 = wasm_f32x4_div(va0, vb);
v128_t vy1 = wasm_f32x4_div(va1, vb);
v128_t vy2 = wasm_f32x4_div(va2, vb);
v128_t vy3 = wasm_f32x4_div(va3, vb);
vy0 = wasm_i32x4_max(vy0, vzero);
vy1 = wasm_i32x4_max(vy1, vzero);
vy2 = wasm_i32x4_max(vy2, vzero);
vy3 = wasm_i32x4_max(vy3, vzero);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
wasm_v128_store(output + 8, vy2);
wasm_v128_store(output + 12, vy3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_div(va, vb);
vy = wasm_i32x4_max(vy, vzero);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_div(va, vb);
vy = wasm_i32x4_max(vy, vzero);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,322 | 26.011628 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-relu-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_relu_ukernel__wasmsimd_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vzero = wasm_i32x4_const_splat(0);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_div(va, vb);
vy = wasm_i32x4_max(vy, vzero);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_div(va, vb);
vy = wasm_i32x4_max(vy, vzero);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,533 | 23.741935 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-relu-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_relu_ukernel__wasmsimd_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vzero = wasm_i32x4_const_splat(0);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
v128_t vy0 = wasm_f32x4_div(va0, vb);
v128_t vy1 = wasm_f32x4_div(va1, vb);
vy0 = wasm_i32x4_max(vy0, vzero);
vy1 = wasm_i32x4_max(vy1, vzero);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_div(va, vb);
vy = wasm_i32x4_max(vy, vzero);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_div(va, vb);
vy = wasm_i32x4_max(vy, vzero);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,975 | 24.333333 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_ukernel__scalar_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
float vacc = va / vb;
*output++ = vacc;
}
}
| 911 | 23 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_ukernel__scalar_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
float vacc0 = va0 / vb;
float vacc1 = va1 / vb;
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
float vacc = va / vb;
*output = vacc;
}
}
| 1,196 | 21.584906 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_ukernel__scalar_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
float vacc0 = va0 / vb;
float vacc1 = va1 / vb;
float vacc2 = va2 / vb;
float vacc3 = va3 / vb;
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va / vb;
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,405 | 22.04918 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_ukernel__scalar_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
float vacc0 = va0 / vb;
float vacc1 = va1 / vb;
float vacc2 = va2 / vb;
float vacc3 = va3 / vb;
float vacc4 = va4 / vb;
float vacc5 = va5 / vb;
float vacc6 = va6 / vb;
float vacc7 = va7 / vb;
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va / vb;
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,745 | 22.917808 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_ukernel__wasmsimd_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
v128_t vy0 = wasm_f32x4_div(va0, vb);
v128_t vy1 = wasm_f32x4_div(va1, vb);
v128_t vy2 = wasm_f32x4_div(va2, vb);
v128_t vy3 = wasm_f32x4_div(va3, vb);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
wasm_v128_store(output + 8, vy2);
wasm_v128_store(output + 12, vy3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_div(va, vb);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_div(va, vb);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,046 | 24.911392 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_ukernel__wasmsimd_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_div(va, vb);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_div(va, vb);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,409 | 22.898305 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdivc-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdivc_ukernel__wasmsimd_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
v128_t vy0 = wasm_f32x4_div(va0, vb);
v128_t vy1 = wasm_f32x4_div(va1, vb);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_div(va, vb);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_div(va, vb);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,775 | 23.328767 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmax-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmax_ukernel__avx_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m256 vacc0 = _mm256_loadu_ps(input_a);
__m256 vacc1 = _mm256_loadu_ps(input_a + 8);
input_a += 16;
vacc0 = _mm256_max_ps(vacc0, _mm256_loadu_ps(input_b));
vacc1 = _mm256_max_ps(vacc1, _mm256_loadu_ps(input_b + 8));
input_b += 16;
_mm256_storeu_ps(output, vacc0);
_mm256_storeu_ps(output + 8, vacc1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vacc = _mm256_loadu_ps(input_a);
input_a += 8;
vacc = _mm256_max_ps(vacc, _mm256_loadu_ps(input_b));
input_b += 8;
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vacc = _mm256_maskload_ps(input_a, vmask);
const __m256 vb = _mm256_maskload_ps(input_b, vmask);
vacc = _mm256_max_ps(vacc, vb);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 2,244 | 26.378049 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmax-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmax_ukernel__avx_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vacc = _mm256_loadu_ps(input_a);
input_a += 8;
vacc = _mm256_max_ps(vacc, _mm256_loadu_ps(input_b));
input_b += 8;
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vacc = _mm256_maskload_ps(input_a, vmask);
const __m256 vb = _mm256_maskload_ps(input_b, vmask);
vacc = _mm256_max_ps(vacc, vb);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 1,814 | 26.089552 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmax-avx512f-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmax_ukernel__avx512f_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc = _mm512_loadu_ps(input_a);
input_a += 16;
vacc = _mm512_max_ps(vacc, _mm512_loadu_ps(input_b));
input_b += 16;
_mm512_storeu_ps(output, vacc);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a);
vacc = _mm512_maskz_max_ps(vmask, vacc, _mm512_maskz_loadu_ps(vmask, input_b));
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 1,564 | 26.946429 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmax-avx512f-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmax_ukernel__avx512f_x32(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m512 vacc0 = _mm512_loadu_ps(input_a);
__m512 vacc1 = _mm512_loadu_ps(input_a + 16);
input_a += 32;
vacc0 = _mm512_max_ps(vacc0, _mm512_loadu_ps(input_b));
vacc1 = _mm512_max_ps(vacc1, _mm512_loadu_ps(input_b + 16));
input_b += 32;
_mm512_storeu_ps(output, vacc0);
_mm512_storeu_ps(output + 16, vacc1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc = _mm512_loadu_ps(input_a);
input_a += 16;
vacc = _mm512_max_ps(vacc, _mm512_loadu_ps(input_b));
input_b += 16;
_mm512_storeu_ps(output, vacc);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a);
vacc = _mm512_maskz_max_ps(vmask, vacc, _mm512_maskz_loadu_ps(vmask, input_b));
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 1,996 | 27.126761 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmax-neon-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmax_ukernel__neon_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t va = vld1q_f32(input_a); input_a += 4;
const float32x4_t vb = vld1q_f32(input_b); input_b += 4;
float32x4_t vacc = vmaxq_f32(va, vb);
vst1q_f32(output, vacc); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t va = vld1q_f32(input_a);
const float32x4_t vb = vld1q_f32(input_b);
float32x4_t vacc = vmaxq_f32(va, vb);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 1,456 | 25.017857 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmax-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmax_ukernel__neon_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(input_a); input_a += 4;
const float32x4_t vb0 = vld1q_f32(input_b); input_b += 4;
const float32x4_t va1 = vld1q_f32(input_a); input_a += 4;
const float32x4_t vb1 = vld1q_f32(input_b); input_b += 4;
float32x4_t vacc0 = vmaxq_f32(va0, vb0);
float32x4_t vacc1 = vmaxq_f32(va1, vb1);
vst1q_f32(output, vacc0); output += 4;
vst1q_f32(output, vacc1); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t va = vld1q_f32(input_a); input_a += 4;
const float32x4_t vb = vld1q_f32(input_b); input_b += 4;
float32x4_t vacc = vmaxq_f32(va, vb);
vst1q_f32(output, vacc); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t va = vld1q_f32(input_a);
const float32x4_t vb = vld1q_f32(input_b);
float32x4_t vacc = vmaxq_f32(va, vb);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 1,955 | 26.942857 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmax-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmax_ukernel__scalar_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = math_max_f32(va, vb);
*output++ = vacc;
}
}
| 926 | 23.394737 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmax-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmax_ukernel__scalar_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
input_b += 2;
float vacc0 = math_max_f32(va0, vb0);
float vacc1 = math_max_f32(va1, vb1);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
const float vb = *input_b;
float vacc = math_max_f32(va, vb);
*output = vacc;
}
}
| 1,324 | 22.245614 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmax-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmax_ukernel__scalar_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
input_b += 4;
float vacc0 = math_max_f32(va0, vb0);
float vacc1 = math_max_f32(va1, vb1);
float vacc2 = math_max_f32(va2, vb2);
float vacc3 = math_max_f32(va3, vb3);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = math_max_f32(va, vb);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,633 | 23.38806 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmax-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmax_ukernel__scalar_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
const float vb4 = input_b[4];
const float vb5 = input_b[5];
const float vb6 = input_b[6];
const float vb7 = input_b[7];
input_b += 8;
float vacc0 = math_max_f32(va0, vb0);
float vacc1 = math_max_f32(va1, vb1);
float vacc2 = math_max_f32(va2, vb2);
float vacc3 = math_max_f32(va3, vb3);
float vacc4 = math_max_f32(va4, vb4);
float vacc5 = math_max_f32(va5, vb5);
float vacc6 = math_max_f32(va6, vb6);
float vacc7 = math_max_f32(va7, vb7);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = math_max_f32(va, vb);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,165 | 25.096386 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmax-sse-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmax_ukernel__sse_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 va = _mm_loadu_ps(input_a);
input_a += 4;
const __m128 vb = _mm_loadu_ps(input_b);
input_b += 4;
__m128 vacc = _mm_max_ps(va, vb);
_mm_storeu_ps(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 va = _mm_loadu_ps(input_a);
const __m128 vb = _mm_loadu_ps(input_b);
__m128 vacc = _mm_max_ps(va, vb);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 1,464 | 23.016393 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmax-sse-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmax_ukernel__sse_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(input_a);
const __m128 va1 = _mm_loadu_ps(input_a + 4);
input_a += 8;
const __m128 vb0 = _mm_loadu_ps(input_b);
const __m128 vb1 = _mm_loadu_ps(input_b + 4);
input_b += 8;
__m128 vacc0 = _mm_max_ps(va0, vb0);
__m128 vacc1 = _mm_max_ps(va1, vb1);
_mm_storeu_ps(output, vacc0);
_mm_storeu_ps(output + 4, vacc1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 va = _mm_loadu_ps(input_a);
input_a += 4;
const __m128 vb = _mm_loadu_ps(input_b);
input_b += 4;
__m128 vacc = _mm_max_ps(va, vb);
_mm_storeu_ps(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 va = _mm_loadu_ps(input_a);
const __m128 vb = _mm_loadu_ps(input_b);
__m128 vacc = _mm_max_ps(va, vb);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 1,939 | 23.556962 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmax-wasm-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmax_ukernel__wasm_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = __builtin_wasm_max_f32(va, vb);
*output++ = vacc;
}
}
| 934 | 23.605263 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmax-wasm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmax_ukernel__wasm_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
input_b += 2;
float vacc0 = __builtin_wasm_max_f32(va0, vb0);
float vacc1 = __builtin_wasm_max_f32(va1, vb1);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
const float vb = *input_b;
float vacc = __builtin_wasm_max_f32(va, vb);
*output = vacc;
}
}
| 1,352 | 22.736842 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmax-wasm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmax_ukernel__wasm_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
input_b += 4;
float vacc0 = __builtin_wasm_max_f32(va0, vb0);
float vacc1 = __builtin_wasm_max_f32(va1, vb1);
float vacc2 = __builtin_wasm_max_f32(va2, vb2);
float vacc3 = __builtin_wasm_max_f32(va3, vb3);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = __builtin_wasm_max_f32(va, vb);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,681 | 24.104478 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmax-wasm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmax_ukernel__wasm_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
const float vb4 = input_b[4];
const float vb5 = input_b[5];
const float vb6 = input_b[6];
const float vb7 = input_b[7];
input_b += 8;
float vacc0 = __builtin_wasm_max_f32(va0, vb0);
float vacc1 = __builtin_wasm_max_f32(va1, vb1);
float vacc2 = __builtin_wasm_max_f32(va2, vb2);
float vacc3 = __builtin_wasm_max_f32(va3, vb3);
float vacc4 = __builtin_wasm_max_f32(va4, vb4);
float vacc5 = __builtin_wasm_max_f32(va5, vb5);
float vacc6 = __builtin_wasm_max_f32(va6, vb6);
float vacc7 = __builtin_wasm_max_f32(va7, vb7);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = __builtin_wasm_max_f32(va, vb);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,253 | 26.156627 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmax-wasmsimd-arm-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmax_ukernel__wasmsimd_arm_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
const v128_t vb2 = wasm_v128_load(input_b + 8);
const v128_t vb3 = wasm_v128_load(input_b + 12);
input_b += 16;
v128_t vacc0 = wasm_f32x4_max(va0, vb0);
v128_t vacc1 = wasm_f32x4_max(va1, vb1);
v128_t vacc2 = wasm_f32x4_max(va2, vb2);
v128_t vacc3 = wasm_f32x4_max(va3, vb3);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
wasm_v128_store(output + 8, vacc2);
wasm_v128_store(output + 12, vacc3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_max(va, vb);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_max(va, vb);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 2,369 | 25.931818 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmax-wasmsimd-arm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmax_ukernel__wasmsimd_arm_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_max(va, vb);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_max(va, vb);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,487 | 23 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmax-wasmsimd-arm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmax_ukernel__wasmsimd_arm_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
input_b += 8;
v128_t vacc0 = wasm_f32x4_max(va0, vb0);
v128_t vacc1 = wasm_f32x4_max(va1, vb1);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_max(va, vb);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_max(va, vb);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,982 | 23.7875 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmax-wasmsimd-x86-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmax_ukernel__wasmsimd_x86_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
const v128_t vb2 = wasm_v128_load(input_b + 8);
const v128_t vb3 = wasm_v128_load(input_b + 12);
input_b += 16;
v128_t vacc0 = wasm_f32x4_pmax(va0, vb0);
v128_t vacc1 = wasm_f32x4_pmax(va1, vb1);
v128_t vacc2 = wasm_f32x4_pmax(va2, vb2);
v128_t vacc3 = wasm_f32x4_pmax(va3, vb3);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
wasm_v128_store(output + 8, vacc2);
wasm_v128_store(output + 12, vacc3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_pmax(va, vb);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_pmax(va, vb);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 2,375 | 26 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmax-wasmsimd-x86-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmax_ukernel__wasmsimd_x86_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_pmax(va, vb);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_pmax(va, vb);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,489 | 23.032258 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmax-wasmsimd-x86-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmax_ukernel__wasmsimd_x86_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
input_b += 8;
v128_t vacc0 = wasm_f32x4_pmax(va0, vb0);
v128_t vacc1 = wasm_f32x4_pmax(va1, vb1);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_pmax(va, vb);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_pmax(va, vb);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,986 | 23.8375 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmaxc-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmaxc_ukernel__avx_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m256 vb = _mm256_broadcast_ss(input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m256 vacc0 = _mm256_loadu_ps(input_a);
__m256 vacc1 = _mm256_loadu_ps(input_a + 8);
input_a += 16;
vacc0 = _mm256_max_ps(vacc0, vb);
vacc1 = _mm256_max_ps(vacc1, vb);
_mm256_storeu_ps(output, vacc0);
_mm256_storeu_ps(output + 8, vacc1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vacc = _mm256_loadu_ps(input_a);
input_a += 8;
vacc = _mm256_max_ps(vacc, vb);
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vacc = _mm256_maskload_ps(input_a, vmask);
vacc = _mm256_max_ps(vacc, vb);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 2,131 | 25.65 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmaxc-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmaxc_ukernel__avx_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m256 vb = _mm256_broadcast_ss(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vacc = _mm256_loadu_ps(input_a);
input_a += 8;
vacc = _mm256_max_ps(vacc, vb);
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vacc = _mm256_maskload_ps(input_a, vmask);
vacc = _mm256_max_ps(vacc, vb);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 1,768 | 25.80303 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmaxc-avx512f-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmaxc_ukernel__avx512f_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m512 vb = _mm512_set1_ps(*input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc0 = _mm512_loadu_ps(input_a);
input_a += 16;
vacc0 = _mm512_max_ps(vacc0, vb);
_mm512_storeu_ps(output, vacc0);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a);
vacc = _mm512_maskz_max_ps(vmask, vacc, vb);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 1,541 | 26.052632 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vmaxc-avx512f-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vmaxc_ukernel__avx512f_x32(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m512 vb = _mm512_set1_ps(*input_b);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m512 vacc0 = _mm512_loadu_ps(input_a);
__m512 vacc1 = _mm512_loadu_ps(input_a + 16);
input_a += 32;
vacc0 = _mm512_max_ps(vacc0, vb);
vacc1 = _mm512_max_ps(vacc1, vb);
_mm512_storeu_ps(output, vacc0);
_mm512_storeu_ps(output + 16, vacc1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc = _mm512_loadu_ps(input_a);
input_a += 16;
vacc = _mm512_max_ps(vacc, vb);
_mm512_storeu_ps(output, vacc);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a);
vacc = _mm512_maskz_max_ps(vmask, vacc, vb);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 1,899 | 26.536232 | 105 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.