repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-8x2-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_8x2__scalar(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
size_t output_decrement = output_stride * nc - 8 * sizeof(float);
while (mc >= 8 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = *w++;
float vacc0x1 = *w++;
float vacc1x0 = vacc0x0;
float vacc1x1 = vacc0x1;
float vacc2x0 = vacc0x0;
float vacc2x1 = vacc0x1;
float vacc3x0 = vacc0x0;
float vacc3x1 = vacc0x1;
float vacc4x0 = vacc0x0;
float vacc4x1 = vacc0x1;
float vacc5x0 = vacc0x0;
float vacc5x1 = vacc0x1;
float vacc6x0 = vacc0x0;
float vacc6x1 = vacc0x1;
float vacc7x0 = vacc0x0;
float vacc7x1 = vacc0x1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
const float vi4 = input[4];
const float vi5 = input[5];
const float vi6 = input[6];
const float vi7 = input[7];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = *w++;
const float vw1 = *w++;
vacc0x0 += vi0 * vw0;
vacc1x0 += vi1 * vw0;
vacc2x0 += vi2 * vw0;
vacc3x0 += vi3 * vw0;
vacc4x0 += vi4 * vw0;
vacc5x0 += vi5 * vw0;
vacc6x0 += vi6 * vw0;
vacc7x0 += vi7 * vw0;
vacc0x1 += vi0 * vw1;
vacc1x1 += vi1 * vw1;
vacc2x1 += vi2 * vw1;
vacc3x1 += vi3 * vw1;
vacc4x1 += vi4 * vw1;
vacc5x1 += vi5 * vw1;
vacc6x1 += vi6 * vw1;
vacc7x1 += vi7 * vw1;
} while (--nnz != 0);
}
float vout0x0 = math_min_f32(vacc0x0, vmax);
float vout1x0 = math_min_f32(vacc1x0, vmax);
float vout2x0 = math_min_f32(vacc2x0, vmax);
float vout3x0 = math_min_f32(vacc3x0, vmax);
float vout4x0 = math_min_f32(vacc4x0, vmax);
float vout5x0 = math_min_f32(vacc5x0, vmax);
float vout6x0 = math_min_f32(vacc6x0, vmax);
float vout7x0 = math_min_f32(vacc7x0, vmax);
float vout0x1 = math_min_f32(vacc0x1, vmax);
float vout1x1 = math_min_f32(vacc1x1, vmax);
float vout2x1 = math_min_f32(vacc2x1, vmax);
float vout3x1 = math_min_f32(vacc3x1, vmax);
float vout4x1 = math_min_f32(vacc4x1, vmax);
float vout5x1 = math_min_f32(vacc5x1, vmax);
float vout6x1 = math_min_f32(vacc6x1, vmax);
float vout7x1 = math_min_f32(vacc7x1, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
vout1x0 = math_max_f32(vout1x0, vmin);
vout2x0 = math_max_f32(vout2x0, vmin);
vout3x0 = math_max_f32(vout3x0, vmin);
vout4x0 = math_max_f32(vout4x0, vmin);
vout5x0 = math_max_f32(vout5x0, vmin);
vout6x0 = math_max_f32(vout6x0, vmin);
vout7x0 = math_max_f32(vout7x0, vmin);
vout0x1 = math_max_f32(vout0x1, vmin);
vout1x1 = math_max_f32(vout1x1, vmin);
vout2x1 = math_max_f32(vout2x1, vmin);
vout3x1 = math_max_f32(vout3x1, vmin);
vout4x1 = math_max_f32(vout4x1, vmin);
vout5x1 = math_max_f32(vout5x1, vmin);
vout6x1 = math_max_f32(vout6x1, vmin);
vout7x1 = math_max_f32(vout7x1, vmin);
output[0] = vout0x1;
output[1] = vout1x1;
output[2] = vout2x1;
output[3] = vout3x1;
output[4] = vout4x1;
output[5] = vout5x1;
output[6] = vout6x1;
output[7] = vout7x1;
output[0] = vout0x0;
output[1] = vout1x0;
output[2] = vout2x0;
output[3] = vout3x0;
output[4] = vout4x0;
output[5] = vout5x0;
output[6] = vout6x0;
output[7] = vout7x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
output[0] = vout0x1;
output[1] = vout1x1;
output[2] = vout2x1;
output[3] = vout3x1;
output[4] = vout4x1;
output[5] = vout5x1;
output[6] = vout6x1;
output[7] = vout7x1;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 2;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = *w++;
float vacc1 = vacc0;
float vacc2 = vacc0;
float vacc3 = vacc0;
float vacc4 = vacc0;
float vacc5 = vacc0;
float vacc6 = vacc0;
float vacc7 = vacc0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
const float vi4 = input[4];
const float vi5 = input[5];
const float vi6 = input[6];
const float vi7 = input[7];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = *w++;
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
vacc2 += vi2 * vw;
vacc3 += vi3 * vw;
vacc4 += vi4 * vw;
vacc5 += vi5 * vw;
vacc6 += vi6 * vw;
vacc7 += vi7 * vw;
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
float vout2 = math_min_f32(vacc2, vmax);
float vout3 = math_min_f32(vacc3, vmax);
float vout4 = math_min_f32(vacc4, vmax);
float vout5 = math_min_f32(vacc5, vmax);
float vout6 = math_min_f32(vacc6, vmax);
float vout7 = math_min_f32(vacc7, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
vout2 = math_max_f32(vout2, vmin);
vout3 = math_max_f32(vout3, vmin);
vout4 = math_max_f32(vout4, vmin);
vout5 = math_max_f32(vout5, vmin);
vout6 = math_max_f32(vout6, vmin);
vout7 = math_max_f32(vout7, vmin);
output[0] = vout0;
output[1] = vout1;
output[2] = vout2;
output[3] = vout3;
output[4] = vout4;
output[5] = vout5;
output[6] = vout6;
output[7] = vout7;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
mc -= 8 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = *w++;
float vacc0x1 = *w++;
float vacc1x0 = vacc0x0;
float vacc2x0 = vacc0x0;
float vacc3x0 = vacc0x0;
float vacc1x1 = vacc0x1;
float vacc2x1 = vacc0x1;
float vacc3x1 = vacc0x1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = *w++;
const float vw1 = *w++;
vacc0x0 += vi0 * vw0;
vacc1x0 += vi1 * vw0;
vacc2x0 += vi2 * vw0;
vacc3x0 += vi3 * vw0;
vacc0x1 += vi0 * vw1;
vacc1x1 += vi1 * vw1;
vacc2x1 += vi2 * vw1;
vacc3x1 += vi3 * vw1;
} while (--nnz != 0);
}
float vout0x0 = math_min_f32(vacc0x0, vmax);
float vout1x0 = math_min_f32(vacc1x0, vmax);
float vout2x0 = math_min_f32(vacc2x0, vmax);
float vout3x0 = math_min_f32(vacc3x0, vmax);
float vout0x1 = math_min_f32(vacc0x1, vmax);
float vout1x1 = math_min_f32(vacc1x1, vmax);
float vout2x1 = math_min_f32(vacc2x1, vmax);
float vout3x1 = math_min_f32(vacc3x1, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
vout1x0 = math_max_f32(vout1x0, vmin);
vout2x0 = math_max_f32(vout2x0, vmin);
vout3x0 = math_max_f32(vout3x0, vmin);
vout0x1 = math_max_f32(vout0x1, vmin);
vout1x1 = math_max_f32(vout1x1, vmin);
vout2x1 = math_max_f32(vout2x1, vmin);
vout3x1 = math_max_f32(vout3x1, vmin);
output[0] = vout0x0;
output[1] = vout1x0;
output[2] = vout2x0;
output[3] = vout3x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
output[0] = vout0x1;
output[1] = vout1x1;
output[2] = vout2x1;
output[3] = vout3x1;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 2;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = *w++;
float vacc1 = vacc0;
float vacc2 = vacc0;
float vacc3 = vacc0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = *w++;
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
vacc2 += vi2 * vw;
vacc3 += vi3 * vw;
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
float vout2 = math_min_f32(vacc2, vmax);
float vout3 = math_min_f32(vacc3, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
vout2 = math_max_f32(vout2, vmin);
vout3 = math_max_f32(vout3, vmin);
output[0] = vout0;
output[1] = vout1;
output[2] = vout2;
output[3] = vout3;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = *w++;
float vacc0x1 = *w++;
float vacc1x0 = vacc0x0;
float vacc1x1 = vacc0x1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = *w++;
const float vw1 = *w++;
vacc0x0 += vi0 * vw0;
vacc1x0 += vi1 * vw0;
vacc0x1 += vi0 * vw1;
vacc1x1 += vi1 * vw1;
} while (--nnz != 0);
}
float vout0x0 = math_min_f32(vacc0x0, vmax);
float vout1x0 = math_min_f32(vacc1x0, vmax);
float vout0x1 = math_min_f32(vacc0x1, vmax);
float vout1x1 = math_min_f32(vacc1x1, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
vout1x0 = math_max_f32(vout1x0, vmin);
vout0x1 = math_max_f32(vout0x1, vmin);
vout1x1 = math_max_f32(vout1x1, vmin);
output[0] = vout0x0;
output[1] = vout1x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
output[0] = vout0x1;
output[1] = vout1x1;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 2;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = *w++;
float vacc1 = vacc0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = *w++;
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
output[0] = vout0;
output[1] = vout1;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = *w++;
float vacc0x1 = *w++;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = *w++;
const float vw1 = *w++;
vacc0x0 += vi0 * vw0;
vacc0x1 += vi0 * vw1;
} while (--nnz != 0);
}
float vout0x0 = math_min_f32(vacc0x0, vmax);
float vout0x1 = math_min_f32(vacc0x1, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
vout0x1 = math_max_f32(vout0x1, vmin);
output[0] = vout0x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
output[0] = vout0x1;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 2;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = *w++;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = *w++;
vacc0 += vi0 * vw;
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
vout0 = math_max_f32(vout0, vmin);
output[0] = vout0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 15,892 | 34.475446 | 84 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-8x1-minmax-wasmsimd-arm-pipelined-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_arm_pipelined_x2(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 8 * sizeof(float);
while XNN_LIKELY(mc >= 8 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
v128_t vw = wasm_v128_load32_splat(w); w += 1;
intptr_t diff = *dmap++;
v128_t vi0123 = wasm_v128_load(input + 0);
v128_t vi4567 = wasm_v128_load(input + 4);
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = vw;
v128_t vacc4567 = vw;
vw = wasm_v128_load32_splat(w); w += 1;
for (; nnz >= 2; nnz -= 2) {
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vi4567 = wasm_v128_load(input + 4);
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vi4567 = wasm_v128_load(input + 4);
}
if XNN_LIKELY(nnz != 0) {
do {
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vi4567 = wasm_v128_load(input + 4);
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
mc -= 8 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
vout0123 = wasm_f32x4_max(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_min(vmax, vacc01);
vout01 = wasm_f32x4_max(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_min(vmax, vacc0);
vout0 = wasm_f32x4_max(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 6,555 | 36.462857 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-8x1-minmax-wasmsimd-arm-pipelined.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_arm_pipelined(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 8 * sizeof(float);
while XNN_LIKELY(mc >= 8 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
v128_t vw = wasm_v128_load32_splat(w); w += 1;
intptr_t diff = *dmap++;
v128_t vi0123 = wasm_v128_load(input + 0);
v128_t vi4567 = wasm_v128_load(input + 4);
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = vw;
v128_t vacc4567 = vw;
vw = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vi4567 = wasm_v128_load(input + 4);
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
mc -= 8 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
vout0123 = wasm_f32x4_max(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_min(vmax, vacc01);
vout01 = wasm_f32x4_max(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_min(vmax, vacc0);
vout0 = wasm_f32x4_max(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 5,731 | 35.050314 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-8x1-minmax-wasmsimd-arm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_arm_x2(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 8 * sizeof(float);
while XNN_LIKELY(mc >= 8 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123x0 = wasm_v128_load32_splat(w);
w += 1;
v128_t vacc0123x1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc4567x0 = vacc0123x0;
v128_t vacc4567x1 = wasm_f32x4_const_splat(0.0f);
for (; nnz >= 2; nnz -= 2) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
dmap += 2;
const v128_t vi0123x0 = wasm_v128_load(input);
const v128_t vi4567x0 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
const v128_t vw0 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x0 = wasm_f32x4_add(vacc0123x0, wasm_f32x4_mul(vi0123x0, vw0));
vacc4567x0 = wasm_f32x4_add(vacc4567x0, wasm_f32x4_mul(vi4567x0, vw0));
const v128_t vi0123x1 = wasm_v128_load(input);
const v128_t vi4567x1 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
const v128_t vw1 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x1 = wasm_f32x4_add(vacc0123x1, wasm_f32x4_mul(vi0123x1, vw1));
vacc4567x1 = wasm_f32x4_add(vacc4567x1, wasm_f32x4_mul(vi4567x1, vw1));
}
v128_t vacc0123 = vacc0123x0;
v128_t vacc4567 = vacc4567x0;
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x1);
vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x1);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
mc -= 8 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
vout0123 = wasm_f32x4_max(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_min(vmax, vacc01);
vout01 = wasm_f32x4_max(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_min(vmax, vacc0);
vout0 = wasm_f32x4_max(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 6,872 | 37.830508 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-8x1-minmax-wasmsimd-arm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_arm_x4(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 8 * sizeof(float);
while XNN_LIKELY(mc >= 8 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123x0 = wasm_v128_load32_splat(w);
w += 1;
v128_t vacc0123x1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc0123x2 = wasm_f32x4_const_splat(0.0f);
v128_t vacc0123x3 = wasm_f32x4_const_splat(0.0f);
v128_t vacc4567x0 = vacc0123x0;
v128_t vacc4567x1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc4567x2 = wasm_f32x4_const_splat(0.0f);
v128_t vacc4567x3 = wasm_f32x4_const_splat(0.0f);
for (; nnz >= 4; nnz -= 4) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
const intptr_t diff2 = dmap[2];
const intptr_t diff3 = dmap[3];
dmap += 4;
const v128_t vi0123x0 = wasm_v128_load(input);
const v128_t vi4567x0 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
const v128_t vw0 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x0 = wasm_f32x4_add(vacc0123x0, wasm_f32x4_mul(vi0123x0, vw0));
vacc4567x0 = wasm_f32x4_add(vacc4567x0, wasm_f32x4_mul(vi4567x0, vw0));
const v128_t vi0123x1 = wasm_v128_load(input);
const v128_t vi4567x1 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
const v128_t vw1 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x1 = wasm_f32x4_add(vacc0123x1, wasm_f32x4_mul(vi0123x1, vw1));
vacc4567x1 = wasm_f32x4_add(vacc4567x1, wasm_f32x4_mul(vi4567x1, vw1));
const v128_t vi0123x2 = wasm_v128_load(input);
const v128_t vi4567x2 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff2);
const v128_t vw2 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x2 = wasm_f32x4_add(vacc0123x2, wasm_f32x4_mul(vi0123x2, vw2));
vacc4567x2 = wasm_f32x4_add(vacc4567x2, wasm_f32x4_mul(vi4567x2, vw2));
const v128_t vi0123x3 = wasm_v128_load(input);
const v128_t vi4567x3 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff3);
const v128_t vw3 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x3 = wasm_f32x4_add(vacc0123x3, wasm_f32x4_mul(vi0123x3, vw3));
vacc4567x3 = wasm_f32x4_add(vacc4567x3, wasm_f32x4_mul(vi4567x3, vw3));
}
v128_t vacc0123 = vacc0123x0;
v128_t vacc4567 = vacc4567x0;
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x1);
vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x1);
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x2);
vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x2);
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x3);
vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x3);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
mc -= 8 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
vout0123 = wasm_f32x4_max(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_min(vmax, vacc01);
vout01 = wasm_f32x4_max(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_min(vmax, vacc0);
vout0 = wasm_f32x4_max(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 8,244 | 40.0199 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-8x1-minmax-wasmsimd-arm.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_arm(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 8 * sizeof(float);
while XNN_LIKELY(mc >= 8 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_min(vmax, vacc4567);
vout0123 = wasm_f32x4_max(vmin, vout0123);
vout4567 = wasm_f32x4_max(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
mc -= 8 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_min(vmax, vacc0123);
vout0123 = wasm_f32x4_max(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_min(vmax, vacc01);
vout01 = wasm_f32x4_max(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_min(vmax, vacc0);
vout0 = wasm_f32x4_max(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 5,573 | 35.913907 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-8x1-minmax-wasmsimd-x86-pipelined-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_x86_pipelined_x2(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 8 * sizeof(float);
while XNN_LIKELY(mc >= 8 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
v128_t vw = wasm_v128_load32_splat(w); w += 1;
intptr_t diff = *dmap++;
v128_t vi0123 = wasm_v128_load(input + 0);
v128_t vi4567 = wasm_v128_load(input + 4);
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = vw;
v128_t vacc4567 = vw;
vw = wasm_v128_load32_splat(w); w += 1;
for (; nnz >= 2; nnz -= 2) {
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vi4567 = wasm_v128_load(input + 4);
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vi4567 = wasm_v128_load(input + 4);
}
if XNN_LIKELY(nnz != 0) {
do {
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vi4567 = wasm_v128_load(input + 4);
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
mc -= 8 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_pmin(vmax, vacc01);
vout01 = wasm_f32x4_pmax(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_pmin(vmax, vacc0);
vout0 = wasm_f32x4_pmax(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 6,565 | 36.52 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-8x1-minmax-wasmsimd-x86-pipelined.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd-pipelined.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_x86_pipelined(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 8 * sizeof(float);
while XNN_LIKELY(mc >= 8 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
v128_t vw = wasm_v128_load32_splat(w); w += 1;
intptr_t diff = *dmap++;
v128_t vi0123 = wasm_v128_load(input + 0);
v128_t vi4567 = wasm_v128_load(input + 4);
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = vw;
v128_t vacc4567 = vw;
vw = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
vi0123 = wasm_v128_load(input + 0);
vi4567 = wasm_v128_load(input + 4);
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
mc -= 8 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_pmin(vmax, vacc01);
vout01 = wasm_f32x4_pmax(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_pmin(vmax, vacc0);
vout0 = wasm_f32x4_pmax(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 5,741 | 35.113208 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-8x1-minmax-wasmsimd-x86-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_x86_x2(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 8 * sizeof(float);
while XNN_LIKELY(mc >= 8 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123x0 = wasm_v128_load32_splat(w);
w += 1;
v128_t vacc0123x1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc4567x0 = vacc0123x0;
v128_t vacc4567x1 = wasm_f32x4_const_splat(0.0f);
for (; nnz >= 2; nnz -= 2) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
dmap += 2;
const v128_t vi0123x0 = wasm_v128_load(input);
const v128_t vi4567x0 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
const v128_t vw0 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x0 = wasm_f32x4_add(vacc0123x0, wasm_f32x4_mul(vi0123x0, vw0));
vacc4567x0 = wasm_f32x4_add(vacc4567x0, wasm_f32x4_mul(vi4567x0, vw0));
const v128_t vi0123x1 = wasm_v128_load(input);
const v128_t vi4567x1 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
const v128_t vw1 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x1 = wasm_f32x4_add(vacc0123x1, wasm_f32x4_mul(vi0123x1, vw1));
vacc4567x1 = wasm_f32x4_add(vacc4567x1, wasm_f32x4_mul(vi4567x1, vw1));
}
v128_t vacc0123 = vacc0123x0;
v128_t vacc4567 = vacc4567x0;
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x1);
vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x1);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
mc -= 8 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_pmin(vmax, vacc01);
vout01 = wasm_f32x4_pmax(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_pmin(vmax, vacc0);
vout0 = wasm_f32x4_pmax(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 6,882 | 37.887006 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-8x1-minmax-wasmsimd-x86-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_x86_x4(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 8 * sizeof(float);
while XNN_LIKELY(mc >= 8 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123x0 = wasm_v128_load32_splat(w);
w += 1;
v128_t vacc0123x1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc0123x2 = wasm_f32x4_const_splat(0.0f);
v128_t vacc0123x3 = wasm_f32x4_const_splat(0.0f);
v128_t vacc4567x0 = vacc0123x0;
v128_t vacc4567x1 = wasm_f32x4_const_splat(0.0f);
v128_t vacc4567x2 = wasm_f32x4_const_splat(0.0f);
v128_t vacc4567x3 = wasm_f32x4_const_splat(0.0f);
for (; nnz >= 4; nnz -= 4) {
const intptr_t diff0 = dmap[0];
const intptr_t diff1 = dmap[1];
const intptr_t diff2 = dmap[2];
const intptr_t diff3 = dmap[3];
dmap += 4;
const v128_t vi0123x0 = wasm_v128_load(input);
const v128_t vi4567x0 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff0);
const v128_t vw0 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x0 = wasm_f32x4_add(vacc0123x0, wasm_f32x4_mul(vi0123x0, vw0));
vacc4567x0 = wasm_f32x4_add(vacc4567x0, wasm_f32x4_mul(vi4567x0, vw0));
const v128_t vi0123x1 = wasm_v128_load(input);
const v128_t vi4567x1 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff1);
const v128_t vw1 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x1 = wasm_f32x4_add(vacc0123x1, wasm_f32x4_mul(vi0123x1, vw1));
vacc4567x1 = wasm_f32x4_add(vacc4567x1, wasm_f32x4_mul(vi4567x1, vw1));
const v128_t vi0123x2 = wasm_v128_load(input);
const v128_t vi4567x2 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff2);
const v128_t vw2 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x2 = wasm_f32x4_add(vacc0123x2, wasm_f32x4_mul(vi0123x2, vw2));
vacc4567x2 = wasm_f32x4_add(vacc4567x2, wasm_f32x4_mul(vi4567x2, vw2));
const v128_t vi0123x3 = wasm_v128_load(input);
const v128_t vi4567x3 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff3);
const v128_t vw3 = wasm_v128_load32_splat(w);
w += 1;
vacc0123x3 = wasm_f32x4_add(vacc0123x3, wasm_f32x4_mul(vi0123x3, vw3));
vacc4567x3 = wasm_f32x4_add(vacc4567x3, wasm_f32x4_mul(vi4567x3, vw3));
}
v128_t vacc0123 = vacc0123x0;
v128_t vacc4567 = vacc4567x0;
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x1);
vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x1);
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x2);
vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x2);
vacc0123 = wasm_f32x4_add(vacc0123, vacc0123x3);
vacc4567 = wasm_f32x4_add(vacc4567, vacc4567x3);
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
mc -= 8 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_pmin(vmax, vacc01);
vout01 = wasm_f32x4_pmax(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_pmin(vmax, vacc0);
vout0 = wasm_f32x4_pmax(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 8,254 | 40.069652 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-8x1-minmax-wasmsimd-x86.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_8x1__wasmsimd_x86(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - 8 * sizeof(float);
while XNN_LIKELY(mc >= 8 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
v128_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
vacc4567 = wasm_f32x4_add(vacc4567, wasm_f32x4_mul(vi4567, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
v128_t vout4567 = wasm_f32x4_pmin(vmax, vacc4567);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
vout4567 = wasm_f32x4_pmax(vmin, vout4567);
wasm_v128_store(output, vout0123);
wasm_v128_store(output + 4, vout4567);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
mc -= 8 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0123 = wasm_v128_load(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0123 = wasm_f32x4_add(vacc0123, wasm_f32x4_mul(vi0123, vw));
} while (--nnz != 0);
}
v128_t vout0123 = wasm_f32x4_pmin(vmax, vacc0123);
vout0123 = wasm_f32x4_pmax(vmin, vout0123);
wasm_v128_store(output, vout0123);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi01 = wasm_v128_load64_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc01 = wasm_f32x4_add(vacc01, wasm_f32x4_mul(vi01, vw));
} while (--nnz != 0);
}
v128_t vout01 = wasm_f32x4_pmin(vmax, vacc01);
vout01 = wasm_f32x4_pmax(vmin, vout01);
wasm_v128_store64_lane(output, vout01, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const v128_t vi0 = wasm_v128_load32_splat(input);
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
vacc0 = wasm_f32x4_add(vacc0, wasm_f32x4_mul(vi0, vw));
} while (--nnz != 0);
}
v128_t vout0 = wasm_f32x4_pmin(vmax, vacc0);
vout0 = wasm_f32x4_pmax(vmin, vout0);
wasm_v128_store32_lane(output, vout0, 0);
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 5,583 | 35.980132 | 82 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-8x2-minmax-aarch64-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon-blocked.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_8x2__aarch64_neonfma(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 8 * sizeof(float);
while XNN_LIKELY(mc >= 8 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123n0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n0 = vacc0123n0;
float32x4_t vacc0123n1 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n1 = vacc0123n1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x2_t vw = vld1_f32(w); w += 2;
xnn_prefetch_to_l1(w + 32);
vacc0123n0 = vfmaq_lane_f32(vacc0123n0, vi0123, vw, 0);
vacc4567n0 = vfmaq_lane_f32(vacc4567n0, vi4567, vw, 0);
vacc0123n1 = vfmaq_lane_f32(vacc0123n1, vi0123, vw, 1);
vacc4567n1 = vfmaq_lane_f32(vacc4567n1, vi4567, vw, 1);
} while (--nnz != 0);
}
float32x4_t vout0123n0 = vminq_f32(vacc0123n0, vmax);
float32x4_t vout4567n0 = vminq_f32(vacc4567n0, vmax);
float32x4_t vout0123n1 = vminq_f32(vacc0123n1, vmax);
float32x4_t vout4567n1 = vminq_f32(vacc4567n1, vmax);
vout0123n0 = vmaxq_f32(vout0123n0, vmin);
vout4567n0 = vmaxq_f32(vout4567n0, vmin);
vout0123n1 = vmaxq_f32(vout0123n1, vmin);
vout4567n1 = vmaxq_f32(vout4567n1, vmin);
vst1q_f32(output + 0, vout0123n0);
vst1q_f32(output + 4, vout4567n0);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n1);
vst1q_f32(output + 4, vout4567n1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 2;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output + 0, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
mc -= 8 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123n0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc0123n1 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_f32(w); w += 2;
vacc0123n0 = vfmaq_lane_f32(vacc0123n0, vi0123, vw, 0);
vacc0123n1 = vfmaq_lane_f32(vacc0123n1, vi0123, vw, 1);
} while (--nnz != 0);
}
float32x4_t vout0123n0 = vminq_f32(vacc0123n0, vmax);
float32x4_t vout0123n1 = vminq_f32(vacc0123n1, vmax);
vout0123n0 = vmaxq_f32(vout0123n0, vmin);
vout0123n1 = vmaxq_f32(vout0123n1, vmin);
vst1q_f32(output + 0, vout0123n0);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 2;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output + 0, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01n0 = vld1_dup_f32(w); w += 1;
float32x2_t vacc01n1 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_f32(w); w += 2;
vacc01n0 = vfma_lane_f32(vacc01n0, vi01, vw, 0);
vacc01n1 = vfma_lane_f32(vacc01n1, vi01, vw, 1);
} while (--nnz != 0);
}
float32x2_t vout01n0 = vmin_f32(vacc01n0, vget_low_f32(vmax));
float32x2_t vout01n1 = vmin_f32(vacc01n1, vget_low_f32(vmax));
vout01n0 = vmax_f32(vout01n0, vget_low_f32(vmin));
vout01n1 = vmax_f32(vout01n1, vget_low_f32(vmin));
vst1_f32(output + 0, vout01n0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_f32(output + 0, vout01n1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 2;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vfma_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 2) {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0n0 = vld1_dup_f32(w); w += 1;
float32x2_t vacc0n1 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_f32(w); w += 2;
vacc0n0 = vfma_lane_f32(vacc0n0, vi0, vw, 0);
vacc0n1 = vfma_lane_f32(vacc0n1, vi0, vw, 1);
} while (--nnz != 0);
}
float32x2_t vout0n0 = vmin_f32(vacc0n0, vget_low_f32(vmax));
float32x2_t vout0n1 = vmin_f32(vacc0n1, vget_low_f32(vmax));
vout0n0 = vmax_f32(vout0n0, vget_low_f32(vmin));
vout0n1 = vmax_f32(vout0n1, vget_low_f32(vmin));
vst1_lane_f32(output + 0, vout0n0, 0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_lane_f32(output + 0, vout0n1, 0);
output = (float*) ((uintptr_t) output + output_stride);
n -= 2;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vfma_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 11,269 | 35.95082 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-8x4-minmax-aarch64-neonfma.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/neon-blocked.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/prefetch.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_8x4__aarch64_neonfma(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
#if XNN_ARCH_ARM64
const float32x4x2_t vminmax = vld2q_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vminmax.val[0];
const float32x4_t vmax = vminmax.val[1];
#else
const float32x2x2_t vminmax = vld2_dup_f32(¶ms->scalar.min);
const float32x4_t vmin = vcombine_f32(vminmax.val[0], vminmax.val[0]);
const float32x4_t vmax = vcombine_f32(vminmax.val[1], vminmax.val[1]);
#endif
size_t output_decrement = output_stride * nc - 8 * sizeof(float);
while XNN_LIKELY(mc >= 8 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 4) {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123n0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n0 = vacc0123n0;
float32x4_t vacc0123n1 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n1 = vacc0123n1;
float32x4_t vacc0123n2 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n2 = vacc0123n2;
float32x4_t vacc0123n3 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567n3 = vacc0123n3;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_f32(w); w += 4;
xnn_prefetch_to_l1(w + 32);
vacc0123n0 = vfmaq_laneq_f32(vacc0123n0, vi0123, vw, 0);
vacc4567n0 = vfmaq_laneq_f32(vacc4567n0, vi4567, vw, 0);
vacc0123n1 = vfmaq_laneq_f32(vacc0123n1, vi0123, vw, 1);
vacc4567n1 = vfmaq_laneq_f32(vacc4567n1, vi4567, vw, 1);
vacc0123n2 = vfmaq_laneq_f32(vacc0123n2, vi0123, vw, 2);
vacc4567n2 = vfmaq_laneq_f32(vacc4567n2, vi4567, vw, 2);
vacc0123n3 = vfmaq_laneq_f32(vacc0123n3, vi0123, vw, 3);
vacc4567n3 = vfmaq_laneq_f32(vacc4567n3, vi4567, vw, 3);
} while (--nnz != 0);
}
float32x4_t vout0123n0 = vminq_f32(vacc0123n0, vmax);
float32x4_t vout4567n0 = vminq_f32(vacc4567n0, vmax);
float32x4_t vout0123n1 = vminq_f32(vacc0123n1, vmax);
float32x4_t vout4567n1 = vminq_f32(vacc4567n1, vmax);
float32x4_t vout0123n2 = vminq_f32(vacc0123n2, vmax);
float32x4_t vout4567n2 = vminq_f32(vacc4567n2, vmax);
float32x4_t vout0123n3 = vminq_f32(vacc0123n3, vmax);
float32x4_t vout4567n3 = vminq_f32(vacc4567n3, vmax);
vout0123n0 = vmaxq_f32(vout0123n0, vmin);
vout4567n0 = vmaxq_f32(vout4567n0, vmin);
vout0123n1 = vmaxq_f32(vout0123n1, vmin);
vout4567n1 = vmaxq_f32(vout4567n1, vmin);
vout0123n2 = vmaxq_f32(vout0123n2, vmin);
vout4567n2 = vmaxq_f32(vout4567n2, vmin);
vout0123n3 = vmaxq_f32(vout0123n3, vmin);
vout4567n3 = vmaxq_f32(vout4567n3, vmin);
vst1q_f32(output + 0, vout0123n0);
vst1q_f32(output + 4, vout4567n0);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n1);
vst1q_f32(output + 4, vout4567n1);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n2);
vst1q_f32(output + 4, vout4567n2);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n3);
vst1q_f32(output + 4, vout4567n3);
output = (float*) ((uintptr_t) output + output_stride);
n -= 4;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc4567 = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
const float32x4_t vi4567 = vld1q_f32(input + 4);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
xnn_prefetch_to_l1(input + 16);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
xnn_prefetch_to_l1(w + 32);
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
vacc4567 = vfmaq_f32(vacc4567, vi4567, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
float32x4_t vout4567 = vminq_f32(vacc4567, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vout4567 = vmaxq_f32(vout4567, vmin);
vst1q_f32(output + 0, vout0123);
vst1q_f32(output + 4, vout4567);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 8;
mc -= 8 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 4) {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123n0 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc0123n1 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc0123n2 = vld1q_dup_f32(w); w += 1;
float32x4_t vacc0123n3 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_f32(w); w += 4;
vacc0123n0 = vfmaq_laneq_f32(vacc0123n0, vi0123, vw, 0);
vacc0123n1 = vfmaq_laneq_f32(vacc0123n1, vi0123, vw, 1);
vacc0123n2 = vfmaq_laneq_f32(vacc0123n2, vi0123, vw, 2);
vacc0123n3 = vfmaq_laneq_f32(vacc0123n3, vi0123, vw, 3);
} while (--nnz != 0);
}
float32x4_t vout0123n0 = vminq_f32(vacc0123n0, vmax);
float32x4_t vout0123n1 = vminq_f32(vacc0123n1, vmax);
float32x4_t vout0123n2 = vminq_f32(vacc0123n2, vmax);
float32x4_t vout0123n3 = vminq_f32(vacc0123n3, vmax);
vout0123n0 = vmaxq_f32(vout0123n0, vmin);
vout0123n1 = vmaxq_f32(vout0123n1, vmin);
vout0123n2 = vmaxq_f32(vout0123n2, vmin);
vout0123n3 = vmaxq_f32(vout0123n3, vmin);
vst1q_f32(output + 0, vout0123n0);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n1);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n2);
output = (float*) ((uintptr_t) output + output_stride);
vst1q_f32(output + 0, vout0123n3);
output = (float*) ((uintptr_t) output + output_stride);
n -= 4;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x4_t vacc0123 = vld1q_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x4_t vi0123 = vld1q_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_dup_f32(w); w += 1;
vacc0123 = vfmaq_f32(vacc0123, vi0123, vw);
} while (--nnz != 0);
}
float32x4_t vout0123 = vminq_f32(vacc0123, vmax);
vout0123 = vmaxq_f32(vout0123, vmin);
vst1q_f32(output + 0, vout0123);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 4) {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01n0 = vld1_dup_f32(w); w += 1;
float32x2_t vacc01n1 = vld1_dup_f32(w); w += 1;
float32x2_t vacc01n2 = vld1_dup_f32(w); w += 1;
float32x2_t vacc01n3 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_f32(w); w += 4;
vacc01n0 = vfma_laneq_f32(vacc01n0, vi01, vw, 0);
vacc01n1 = vfma_laneq_f32(vacc01n1, vi01, vw, 1);
vacc01n2 = vfma_laneq_f32(vacc01n2, vi01, vw, 2);
vacc01n3 = vfma_laneq_f32(vacc01n3, vi01, vw, 3);
} while (--nnz != 0);
}
float32x2_t vout01n0 = vmin_f32(vacc01n0, vget_low_f32(vmax));
float32x2_t vout01n1 = vmin_f32(vacc01n1, vget_low_f32(vmax));
float32x2_t vout01n2 = vmin_f32(vacc01n2, vget_low_f32(vmax));
float32x2_t vout01n3 = vmin_f32(vacc01n3, vget_low_f32(vmax));
vout01n0 = vmax_f32(vout01n0, vget_low_f32(vmin));
vout01n1 = vmax_f32(vout01n1, vget_low_f32(vmin));
vout01n2 = vmax_f32(vout01n2, vget_low_f32(vmin));
vout01n3 = vmax_f32(vout01n3, vget_low_f32(vmin));
vst1_f32(output + 0, vout01n0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_f32(output + 0, vout01n1);
output = (float*) ((uintptr_t) output + output_stride);
vst1_f32(output + 0, vout01n2);
output = (float*) ((uintptr_t) output + output_stride);
vst1_f32(output + 0, vout01n3);
output = (float*) ((uintptr_t) output + output_stride);
n -= 4;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc01 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi01 = vld1_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc01 = vfma_f32(vacc01, vi01, vw);
} while (--nnz != 0);
}
float32x2_t vout01 = vmin_f32(vacc01, vget_low_f32(vmax));
vout01 = vmax_f32(vout01, vget_low_f32(vmin));
vst1_f32(output, vout01);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 4) {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0n0 = vld1_dup_f32(w); w += 1;
float32x2_t vacc0n1 = vld1_dup_f32(w); w += 1;
float32x2_t vacc0n2 = vld1_dup_f32(w); w += 1;
float32x2_t vacc0n3 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x4_t vw = vld1q_f32(w); w += 4;
vacc0n0 = vfma_laneq_f32(vacc0n0, vi0, vw, 0);
vacc0n1 = vfma_laneq_f32(vacc0n1, vi0, vw, 1);
vacc0n2 = vfma_laneq_f32(vacc0n2, vi0, vw, 2);
vacc0n3 = vfma_laneq_f32(vacc0n3, vi0, vw, 3);
} while (--nnz != 0);
}
float32x2_t vout0n0 = vmin_f32(vacc0n0, vget_low_f32(vmax));
float32x2_t vout0n1 = vmin_f32(vacc0n1, vget_low_f32(vmax));
float32x2_t vout0n2 = vmin_f32(vacc0n2, vget_low_f32(vmax));
float32x2_t vout0n3 = vmin_f32(vacc0n3, vget_low_f32(vmax));
vout0n0 = vmax_f32(vout0n0, vget_low_f32(vmin));
vout0n1 = vmax_f32(vout0n1, vget_low_f32(vmin));
vout0n2 = vmax_f32(vout0n2, vget_low_f32(vmin));
vout0n3 = vmax_f32(vout0n3, vget_low_f32(vmin));
vst1_lane_f32(output + 0, vout0n0, 0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_lane_f32(output + 0, vout0n1, 0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_lane_f32(output + 0, vout0n2, 0);
output = (float*) ((uintptr_t) output + output_stride);
vst1_lane_f32(output + 0, vout0n3, 0);
output = (float*) ((uintptr_t) output + output_stride);
n -= 4;
}
// clean up loop, fall back to nr=1
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float32x2_t vacc0 = vld1_dup_f32(w); w += 1;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float32x2_t vi0 = vld1_dup_f32(input);
input = (const float*) ((uintptr_t) input + (uintptr_t) diff);
const float32x2_t vw = vld1_dup_f32(w); w += 1;
vacc0 = vfma_f32(vacc0, vi0, vw);
} while (--nnz != 0);
}
float32x2_t vout0 = vmin_f32(vacc0, vget_low_f32(vmax));
vout0 = vmax_f32(vout0, vget_low_f32(vmin));
vst1_lane_f32(output, vout0, 1);
output = (float*) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 14,571 | 39.143251 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-spmm/gen/f32-spmm-8x4-minmax-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-spmm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_8x4__scalar(
size_t mc,
size_t nc,
const float* input,
const float* weights,
const int32_t* widx_dmap,
const uint32_t* nidx_nnzmap,
float* output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
size_t output_decrement = output_stride * nc - 8 * sizeof(float);
while (mc >= 8 * sizeof(float)) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 4) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = *w++;
float vacc0x1 = *w++;
float vacc0x2 = *w++;
float vacc0x3 = *w++;
float vacc1x0 = vacc0x0;
float vacc1x1 = vacc0x1;
float vacc1x2 = vacc0x2;
float vacc1x3 = vacc0x3;
float vacc2x0 = vacc0x0;
float vacc2x1 = vacc0x1;
float vacc2x2 = vacc0x2;
float vacc2x3 = vacc0x3;
float vacc3x0 = vacc0x0;
float vacc3x1 = vacc0x1;
float vacc3x2 = vacc0x2;
float vacc3x3 = vacc0x3;
float vacc4x0 = vacc0x0;
float vacc4x1 = vacc0x1;
float vacc4x2 = vacc0x2;
float vacc4x3 = vacc0x3;
float vacc5x0 = vacc0x0;
float vacc5x1 = vacc0x1;
float vacc5x2 = vacc0x2;
float vacc5x3 = vacc0x3;
float vacc6x0 = vacc0x0;
float vacc6x1 = vacc0x1;
float vacc6x2 = vacc0x2;
float vacc6x3 = vacc0x3;
float vacc7x0 = vacc0x0;
float vacc7x1 = vacc0x1;
float vacc7x2 = vacc0x2;
float vacc7x3 = vacc0x3;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
const float vi4 = input[4];
const float vi5 = input[5];
const float vi6 = input[6];
const float vi7 = input[7];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = *w++;
const float vw1 = *w++;
const float vw2 = *w++;
const float vw3 = *w++;
vacc0x0 += vi0 * vw0;
vacc1x0 += vi1 * vw0;
vacc2x0 += vi2 * vw0;
vacc3x0 += vi3 * vw0;
vacc4x0 += vi4 * vw0;
vacc5x0 += vi5 * vw0;
vacc6x0 += vi6 * vw0;
vacc7x0 += vi7 * vw0;
vacc0x1 += vi0 * vw1;
vacc1x1 += vi1 * vw1;
vacc2x1 += vi2 * vw1;
vacc3x1 += vi3 * vw1;
vacc4x1 += vi4 * vw1;
vacc5x1 += vi5 * vw1;
vacc6x1 += vi6 * vw1;
vacc7x1 += vi7 * vw1;
vacc0x2 += vi0 * vw2;
vacc1x2 += vi1 * vw2;
vacc2x2 += vi2 * vw2;
vacc3x2 += vi3 * vw2;
vacc4x2 += vi4 * vw2;
vacc5x2 += vi5 * vw2;
vacc6x2 += vi6 * vw2;
vacc7x2 += vi7 * vw2;
vacc0x3 += vi0 * vw3;
vacc1x3 += vi1 * vw3;
vacc2x3 += vi2 * vw3;
vacc3x3 += vi3 * vw3;
vacc4x3 += vi4 * vw3;
vacc5x3 += vi5 * vw3;
vacc6x3 += vi6 * vw3;
vacc7x3 += vi7 * vw3;
} while (--nnz != 0);
}
float vout0x0 = math_min_f32(vacc0x0, vmax);
float vout1x0 = math_min_f32(vacc1x0, vmax);
float vout2x0 = math_min_f32(vacc2x0, vmax);
float vout3x0 = math_min_f32(vacc3x0, vmax);
float vout4x0 = math_min_f32(vacc4x0, vmax);
float vout5x0 = math_min_f32(vacc5x0, vmax);
float vout6x0 = math_min_f32(vacc6x0, vmax);
float vout7x0 = math_min_f32(vacc7x0, vmax);
float vout0x1 = math_min_f32(vacc0x1, vmax);
float vout1x1 = math_min_f32(vacc1x1, vmax);
float vout2x1 = math_min_f32(vacc2x1, vmax);
float vout3x1 = math_min_f32(vacc3x1, vmax);
float vout4x1 = math_min_f32(vacc4x1, vmax);
float vout5x1 = math_min_f32(vacc5x1, vmax);
float vout6x1 = math_min_f32(vacc6x1, vmax);
float vout7x1 = math_min_f32(vacc7x1, vmax);
float vout0x2 = math_min_f32(vacc0x2, vmax);
float vout1x2 = math_min_f32(vacc1x2, vmax);
float vout2x2 = math_min_f32(vacc2x2, vmax);
float vout3x2 = math_min_f32(vacc3x2, vmax);
float vout4x2 = math_min_f32(vacc4x2, vmax);
float vout5x2 = math_min_f32(vacc5x2, vmax);
float vout6x2 = math_min_f32(vacc6x2, vmax);
float vout7x2 = math_min_f32(vacc7x2, vmax);
float vout0x3 = math_min_f32(vacc0x3, vmax);
float vout1x3 = math_min_f32(vacc1x3, vmax);
float vout2x3 = math_min_f32(vacc2x3, vmax);
float vout3x3 = math_min_f32(vacc3x3, vmax);
float vout4x3 = math_min_f32(vacc4x3, vmax);
float vout5x3 = math_min_f32(vacc5x3, vmax);
float vout6x3 = math_min_f32(vacc6x3, vmax);
float vout7x3 = math_min_f32(vacc7x3, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
vout1x0 = math_max_f32(vout1x0, vmin);
vout2x0 = math_max_f32(vout2x0, vmin);
vout3x0 = math_max_f32(vout3x0, vmin);
vout4x0 = math_max_f32(vout4x0, vmin);
vout5x0 = math_max_f32(vout5x0, vmin);
vout6x0 = math_max_f32(vout6x0, vmin);
vout7x0 = math_max_f32(vout7x0, vmin);
vout0x1 = math_max_f32(vout0x1, vmin);
vout1x1 = math_max_f32(vout1x1, vmin);
vout2x1 = math_max_f32(vout2x1, vmin);
vout3x1 = math_max_f32(vout3x1, vmin);
vout4x1 = math_max_f32(vout4x1, vmin);
vout5x1 = math_max_f32(vout5x1, vmin);
vout6x1 = math_max_f32(vout6x1, vmin);
vout7x1 = math_max_f32(vout7x1, vmin);
vout0x2 = math_max_f32(vout0x2, vmin);
vout1x2 = math_max_f32(vout1x2, vmin);
vout2x2 = math_max_f32(vout2x2, vmin);
vout3x2 = math_max_f32(vout3x2, vmin);
vout4x2 = math_max_f32(vout4x2, vmin);
vout5x2 = math_max_f32(vout5x2, vmin);
vout6x2 = math_max_f32(vout6x2, vmin);
vout7x2 = math_max_f32(vout7x2, vmin);
vout0x3 = math_max_f32(vout0x3, vmin);
vout1x3 = math_max_f32(vout1x3, vmin);
vout2x3 = math_max_f32(vout2x3, vmin);
vout3x3 = math_max_f32(vout3x3, vmin);
vout4x3 = math_max_f32(vout4x3, vmin);
vout5x3 = math_max_f32(vout5x3, vmin);
vout6x3 = math_max_f32(vout6x3, vmin);
vout7x3 = math_max_f32(vout7x3, vmin);
output[0] = vout0x3;
output[1] = vout1x3;
output[2] = vout2x3;
output[3] = vout3x3;
output[4] = vout4x3;
output[5] = vout5x3;
output[6] = vout6x3;
output[7] = vout7x3;
output[0] = vout0x0;
output[1] = vout1x0;
output[2] = vout2x0;
output[3] = vout3x0;
output[4] = vout4x0;
output[5] = vout5x0;
output[6] = vout6x0;
output[7] = vout7x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
output[0] = vout0x1;
output[1] = vout1x1;
output[2] = vout2x1;
output[3] = vout3x1;
output[4] = vout4x1;
output[5] = vout5x1;
output[6] = vout6x1;
output[7] = vout7x1;
output = (float*restrict) ((uintptr_t) output + output_stride);
output[0] = vout0x2;
output[1] = vout1x2;
output[2] = vout2x2;
output[3] = vout3x2;
output[4] = vout4x2;
output[5] = vout5x2;
output[6] = vout6x2;
output[7] = vout7x2;
output = (float*restrict) ((uintptr_t) output + output_stride);
output[0] = vout0x3;
output[1] = vout1x3;
output[2] = vout2x3;
output[3] = vout3x3;
output[4] = vout4x3;
output[5] = vout5x3;
output[6] = vout6x3;
output[7] = vout7x3;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 4;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = *w++;
float vacc1 = vacc0;
float vacc2 = vacc0;
float vacc3 = vacc0;
float vacc4 = vacc0;
float vacc5 = vacc0;
float vacc6 = vacc0;
float vacc7 = vacc0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
const float vi4 = input[4];
const float vi5 = input[5];
const float vi6 = input[6];
const float vi7 = input[7];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = *w++;
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
vacc2 += vi2 * vw;
vacc3 += vi3 * vw;
vacc4 += vi4 * vw;
vacc5 += vi5 * vw;
vacc6 += vi6 * vw;
vacc7 += vi7 * vw;
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
float vout2 = math_min_f32(vacc2, vmax);
float vout3 = math_min_f32(vacc3, vmax);
float vout4 = math_min_f32(vacc4, vmax);
float vout5 = math_min_f32(vacc5, vmax);
float vout6 = math_min_f32(vacc6, vmax);
float vout7 = math_min_f32(vacc7, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
vout2 = math_max_f32(vout2, vmin);
vout3 = math_max_f32(vout3, vmin);
vout4 = math_max_f32(vout4, vmin);
vout5 = math_max_f32(vout5, vmin);
vout6 = math_max_f32(vout6, vmin);
vout7 = math_max_f32(vout7, vmin);
output[0] = vout0;
output[1] = vout1;
output[2] = vout2;
output[3] = vout3;
output[4] = vout4;
output[5] = vout5;
output[6] = vout6;
output[7] = vout7;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 8;
mc -= 8 * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
output_decrement += 4 * sizeof(float);
if (mc & (4 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 4) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = *w++;
float vacc0x1 = *w++;
float vacc0x2 = *w++;
float vacc0x3 = *w++;
float vacc1x0 = vacc0x0;
float vacc2x0 = vacc0x0;
float vacc3x0 = vacc0x0;
float vacc1x1 = vacc0x1;
float vacc2x1 = vacc0x1;
float vacc3x1 = vacc0x1;
float vacc1x2 = vacc0x2;
float vacc2x2 = vacc0x2;
float vacc3x2 = vacc0x2;
float vacc1x3 = vacc0x3;
float vacc2x3 = vacc0x3;
float vacc3x3 = vacc0x3;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = *w++;
const float vw1 = *w++;
const float vw2 = *w++;
const float vw3 = *w++;
vacc0x0 += vi0 * vw0;
vacc1x0 += vi1 * vw0;
vacc2x0 += vi2 * vw0;
vacc3x0 += vi3 * vw0;
vacc0x1 += vi0 * vw1;
vacc1x1 += vi1 * vw1;
vacc2x1 += vi2 * vw1;
vacc3x1 += vi3 * vw1;
vacc0x2 += vi0 * vw2;
vacc1x2 += vi1 * vw2;
vacc2x2 += vi2 * vw2;
vacc3x2 += vi3 * vw2;
vacc0x3 += vi0 * vw3;
vacc1x3 += vi1 * vw3;
vacc2x3 += vi2 * vw3;
vacc3x3 += vi3 * vw3;
} while (--nnz != 0);
}
float vout0x0 = math_min_f32(vacc0x0, vmax);
float vout1x0 = math_min_f32(vacc1x0, vmax);
float vout2x0 = math_min_f32(vacc2x0, vmax);
float vout3x0 = math_min_f32(vacc3x0, vmax);
float vout0x1 = math_min_f32(vacc0x1, vmax);
float vout1x1 = math_min_f32(vacc1x1, vmax);
float vout2x1 = math_min_f32(vacc2x1, vmax);
float vout3x1 = math_min_f32(vacc3x1, vmax);
float vout0x2 = math_min_f32(vacc0x2, vmax);
float vout1x2 = math_min_f32(vacc1x2, vmax);
float vout2x2 = math_min_f32(vacc2x2, vmax);
float vout3x2 = math_min_f32(vacc3x2, vmax);
float vout0x3 = math_min_f32(vacc0x3, vmax);
float vout1x3 = math_min_f32(vacc1x3, vmax);
float vout2x3 = math_min_f32(vacc2x3, vmax);
float vout3x3 = math_min_f32(vacc3x3, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
vout1x0 = math_max_f32(vout1x0, vmin);
vout2x0 = math_max_f32(vout2x0, vmin);
vout3x0 = math_max_f32(vout3x0, vmin);
vout0x1 = math_max_f32(vout0x1, vmin);
vout1x1 = math_max_f32(vout1x1, vmin);
vout2x1 = math_max_f32(vout2x1, vmin);
vout3x1 = math_max_f32(vout3x1, vmin);
vout0x2 = math_max_f32(vout0x2, vmin);
vout1x2 = math_max_f32(vout1x2, vmin);
vout2x2 = math_max_f32(vout2x2, vmin);
vout3x2 = math_max_f32(vout3x2, vmin);
vout0x3 = math_max_f32(vout0x3, vmin);
vout1x3 = math_max_f32(vout1x3, vmin);
vout2x3 = math_max_f32(vout2x3, vmin);
vout3x3 = math_max_f32(vout3x3, vmin);
output[0] = vout0x0;
output[1] = vout1x0;
output[2] = vout2x0;
output[3] = vout3x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
output[0] = vout0x1;
output[1] = vout1x1;
output[2] = vout2x1;
output[3] = vout3x1;
output = (float*restrict) ((uintptr_t) output + output_stride);
output[0] = vout0x2;
output[1] = vout1x2;
output[2] = vout2x2;
output[3] = vout3x2;
output = (float*restrict) ((uintptr_t) output + output_stride);
output[0] = vout0x3;
output[1] = vout1x3;
output[2] = vout2x3;
output[3] = vout3x3;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 4;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = *w++;
float vacc1 = vacc0;
float vacc2 = vacc0;
float vacc3 = vacc0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
const float vi2 = input[2];
const float vi3 = input[3];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = *w++;
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
vacc2 += vi2 * vw;
vacc3 += vi3 * vw;
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
float vout2 = math_min_f32(vacc2, vmax);
float vout3 = math_min_f32(vacc3, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
vout2 = math_max_f32(vout2, vmin);
vout3 = math_max_f32(vout3, vmin);
output[0] = vout0;
output[1] = vout1;
output[2] = vout2;
output[3] = vout3;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 4;
}
output_decrement += 2 * sizeof(float);
if (mc & (2 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 4) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = *w++;
float vacc0x1 = *w++;
float vacc0x2 = *w++;
float vacc0x3 = *w++;
float vacc1x0 = vacc0x0;
float vacc1x1 = vacc0x1;
float vacc1x2 = vacc0x2;
float vacc1x3 = vacc0x3;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = *w++;
const float vw1 = *w++;
const float vw2 = *w++;
const float vw3 = *w++;
vacc0x0 += vi0 * vw0;
vacc1x0 += vi1 * vw0;
vacc0x1 += vi0 * vw1;
vacc1x1 += vi1 * vw1;
vacc0x2 += vi0 * vw2;
vacc1x2 += vi1 * vw2;
vacc0x3 += vi0 * vw3;
vacc1x3 += vi1 * vw3;
} while (--nnz != 0);
}
float vout0x0 = math_min_f32(vacc0x0, vmax);
float vout1x0 = math_min_f32(vacc1x0, vmax);
float vout0x1 = math_min_f32(vacc0x1, vmax);
float vout1x1 = math_min_f32(vacc1x1, vmax);
float vout0x2 = math_min_f32(vacc0x2, vmax);
float vout1x2 = math_min_f32(vacc1x2, vmax);
float vout0x3 = math_min_f32(vacc0x3, vmax);
float vout1x3 = math_min_f32(vacc1x3, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
vout1x0 = math_max_f32(vout1x0, vmin);
vout0x1 = math_max_f32(vout0x1, vmin);
vout1x1 = math_max_f32(vout1x1, vmin);
vout0x2 = math_max_f32(vout0x2, vmin);
vout1x2 = math_max_f32(vout1x2, vmin);
vout0x3 = math_max_f32(vout0x3, vmin);
vout1x3 = math_max_f32(vout1x3, vmin);
output[0] = vout0x0;
output[1] = vout1x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
output[0] = vout0x1;
output[1] = vout1x1;
output = (float*restrict) ((uintptr_t) output + output_stride);
output[0] = vout0x2;
output[1] = vout1x2;
output = (float*restrict) ((uintptr_t) output + output_stride);
output[0] = vout0x3;
output[1] = vout1x3;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 4;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = *w++;
float vacc1 = vacc0;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
const float vi1 = input[1];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = *w++;
vacc0 += vi0 * vw;
vacc1 += vi1 * vw;
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
float vout1 = math_min_f32(vacc1, vmax);
vout0 = math_max_f32(vout0, vmin);
vout1 = math_max_f32(vout1, vmin);
output[0] = vout0;
output[1] = vout1;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 2;
}
output_decrement += 1 * sizeof(float);
if (mc & (1 * sizeof(float))) {
const float* w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
while (n >= 4) {
uint32_t nnz = *nnzmap++;
float vacc0x0 = *w++;
float vacc0x1 = *w++;
float vacc0x2 = *w++;
float vacc0x3 = *w++;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw0 = *w++;
const float vw1 = *w++;
const float vw2 = *w++;
const float vw3 = *w++;
vacc0x0 += vi0 * vw0;
vacc0x1 += vi0 * vw1;
vacc0x2 += vi0 * vw2;
vacc0x3 += vi0 * vw3;
} while (--nnz != 0);
}
float vout0x0 = math_min_f32(vacc0x0, vmax);
float vout0x1 = math_min_f32(vacc0x1, vmax);
float vout0x2 = math_min_f32(vacc0x2, vmax);
float vout0x3 = math_min_f32(vacc0x3, vmax);
vout0x0 = math_max_f32(vout0x0, vmin);
vout0x1 = math_max_f32(vout0x1, vmin);
vout0x2 = math_max_f32(vout0x2, vmin);
vout0x3 = math_max_f32(vout0x3, vmin);
output[0] = vout0x0;
output = (float*restrict) ((uintptr_t) output + output_stride);
output[0] = vout0x1;
output = (float*restrict) ((uintptr_t) output + output_stride);
output[0] = vout0x2;
output = (float*restrict) ((uintptr_t) output + output_stride);
output[0] = vout0x3;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 4;
}
if XNN_UNLIKELY(n != 0) {
do {
uint32_t nnz = *nnzmap++;
float vacc0 = *w++;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
const float vi0 = input[0];
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const float vw = *w++;
vacc0 += vi0 * vw;
} while (--nnz != 0);
}
float vout0 = math_min_f32(vacc0, vmax);
vout0 = math_max_f32(vout0, vmin);
output[0] = vout0;
output = (float*restrict) ((uintptr_t) output + output_stride);
n -= 1;
} while (n != 0);
}
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += 1;
}
}
}
| 22,444 | 35.555375 | 84 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-minmax-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_minmax_ukernel__avx_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m256 voutput_min = _mm256_load_ps(params->avx.min);
const __m256 voutput_max = _mm256_load_ps(params->avx.max);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m256 vacc0 = _mm256_loadu_ps(input_a);
__m256 vacc1 = _mm256_loadu_ps(input_a + 8);
input_a += 16;
vacc0 = _mm256_add_ps(vacc0, _mm256_loadu_ps(input_b));
vacc1 = _mm256_add_ps(vacc1, _mm256_loadu_ps(input_b + 8));
input_b += 16;
vacc0 = _mm256_max_ps(voutput_min, vacc0);
vacc1 = _mm256_max_ps(voutput_min, vacc1);
vacc0 = _mm256_min_ps(voutput_max, vacc0);
vacc1 = _mm256_min_ps(voutput_max, vacc1);
_mm256_storeu_ps(output, vacc0);
_mm256_storeu_ps(output + 8, vacc1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vacc = _mm256_loadu_ps(input_a);
input_a += 8;
vacc = _mm256_add_ps(vacc, _mm256_loadu_ps(input_b));
input_b += 8;
vacc = _mm256_max_ps(voutput_min, vacc);
vacc = _mm256_min_ps(voutput_max, vacc);
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vacc = _mm256_maskload_ps(input_a, vmask);
const __m256 vb = _mm256_maskload_ps(input_b, vmask);
vacc = _mm256_add_ps(vacc, vb);
vacc = _mm256_max_ps(voutput_min, vacc);
vacc = _mm256_min_ps(voutput_max, vacc);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 2,743 | 28.505376 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-minmax-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_minmax_ukernel__avx_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m256 voutput_min = _mm256_load_ps(params->avx.min);
const __m256 voutput_max = _mm256_load_ps(params->avx.max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vacc = _mm256_loadu_ps(input_a);
input_a += 8;
vacc = _mm256_add_ps(vacc, _mm256_loadu_ps(input_b));
input_b += 8;
vacc = _mm256_max_ps(voutput_min, vacc);
vacc = _mm256_min_ps(voutput_max, vacc);
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vacc = _mm256_maskload_ps(input_a, vmask);
const __m256 vb = _mm256_maskload_ps(input_b, vmask);
vacc = _mm256_add_ps(vacc, vb);
vacc = _mm256_max_ps(voutput_min, vacc);
vacc = _mm256_min_ps(voutput_max, vacc);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 2,124 | 28.109589 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-minmax-avx512f-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_minmax_ukernel__avx512f_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m512 voutput_min = _mm512_set1_ps(params->scalar.min);
const __m512 voutput_max = _mm512_set1_ps(params->scalar.max);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc = _mm512_loadu_ps(input_a);
input_a += 16;
vacc = _mm512_add_ps(vacc, _mm512_loadu_ps(input_b));
input_b += 16;
vacc = _mm512_max_ps(voutput_min, vacc);
vacc = _mm512_min_ps(voutput_max, vacc);
_mm512_storeu_ps(output, vacc);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a);
vacc = _mm512_maskz_add_ps(vmask, vacc, _mm512_maskz_loadu_ps(vmask, input_b));
vacc = _mm512_maskz_max_ps(vmask, voutput_min, vacc);
vacc = _mm512_maskz_min_ps(vmask, voutput_max, vacc);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 1,906 | 29.758065 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-minmax-avx512f-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_minmax_ukernel__avx512f_x32(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m512 voutput_min = _mm512_set1_ps(params->scalar.min);
const __m512 voutput_max = _mm512_set1_ps(params->scalar.max);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m512 vacc0 = _mm512_loadu_ps(input_a);
__m512 vacc1 = _mm512_loadu_ps(input_a + 16);
input_a += 32;
vacc0 = _mm512_add_ps(vacc0, _mm512_loadu_ps(input_b));
vacc1 = _mm512_add_ps(vacc1, _mm512_loadu_ps(input_b + 16));
input_b += 32;
vacc0 = _mm512_max_ps(voutput_min, vacc0);
vacc1 = _mm512_max_ps(voutput_min, vacc1);
vacc0 = _mm512_min_ps(voutput_max, vacc0);
vacc1 = _mm512_min_ps(voutput_max, vacc1);
_mm512_storeu_ps(output, vacc0);
_mm512_storeu_ps(output + 16, vacc1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc = _mm512_loadu_ps(input_a);
input_a += 16;
vacc = _mm512_add_ps(vacc, _mm512_loadu_ps(input_b));
input_b += 16;
vacc = _mm512_max_ps(voutput_min, vacc);
vacc = _mm512_min_ps(voutput_max, vacc);
_mm512_storeu_ps(output, vacc);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a);
vacc = _mm512_maskz_add_ps(vmask, vacc, _mm512_maskz_loadu_ps(vmask, input_b));
vacc = _mm512_maskz_max_ps(vmask, voutput_min, vacc);
vacc = _mm512_maskz_min_ps(vmask, voutput_max, vacc);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 2,527 | 29.829268 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-minmax-neon-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_minmax_ukernel__neon_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float32x4_t voutput_min = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t voutput_max = vld1q_dup_f32(¶ms->scalar.max);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t va = vld1q_f32(input_a); input_a += 4;
const float32x4_t vb = vld1q_f32(input_b); input_b += 4;
float32x4_t vacc = vaddq_f32(va, vb);
vacc = vmaxq_f32(vacc, voutput_min);
vacc = vminq_f32(vacc, voutput_max);
vst1q_f32(output, vacc); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t va = vld1q_f32(input_a);
const float32x4_t vb = vld1q_f32(input_b);
float32x4_t vacc = vaddq_f32(va, vb);
vacc = vmaxq_f32(vacc, voutput_min);
vacc = vminq_f32(vacc, voutput_max);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 1,766 | 27.5 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-minmax-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_minmax_ukernel__neon_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float32x4_t voutput_min = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t voutput_max = vld1q_dup_f32(¶ms->scalar.max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(input_a); input_a += 4;
const float32x4_t vb0 = vld1q_f32(input_b); input_b += 4;
const float32x4_t va1 = vld1q_f32(input_a); input_a += 4;
const float32x4_t vb1 = vld1q_f32(input_b); input_b += 4;
float32x4_t vacc0 = vaddq_f32(va0, vb0);
float32x4_t vacc1 = vaddq_f32(va1, vb1);
vacc0 = vmaxq_f32(vacc0, voutput_min);
vacc1 = vmaxq_f32(vacc1, voutput_min);
vacc0 = vminq_f32(vacc0, voutput_max);
vacc1 = vminq_f32(vacc1, voutput_max);
vst1q_f32(output, vacc0); output += 4;
vst1q_f32(output, vacc1); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t va = vld1q_f32(input_a); input_a += 4;
const float32x4_t vb = vld1q_f32(input_b); input_b += 4;
float32x4_t vacc = vaddq_f32(va, vb);
vacc = vmaxq_f32(vacc, voutput_min);
vacc = vminq_f32(vacc, voutput_max);
vst1q_f32(output, vacc); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t va = vld1q_f32(input_a);
const float32x4_t vb = vld1q_f32(input_b);
float32x4_t vacc = vaddq_f32(va, vb);
vacc = vmaxq_f32(vacc, voutput_min);
vacc = vminq_f32(vacc, voutput_max);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 2,438 | 29.111111 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-minmax-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_minmax_ukernel__scalar_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va + vb;
vacc = math_max_f32(vacc, voutput_min);
vacc = math_min_f32(vacc, voutput_max);
*output++ = vacc;
}
}
| 1,103 | 25.285714 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-minmax-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_minmax_ukernel__scalar_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
input_b += 2;
float vacc0 = va0 + vb0;
float vacc1 = va1 + vb1;
vacc0 = math_max_f32(vacc0, voutput_min);
vacc1 = math_max_f32(vacc1, voutput_min);
vacc0 = math_min_f32(vacc0, voutput_max);
vacc1 = math_min_f32(vacc1, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
const float vb = *input_b;
float vacc = va + vb;
vacc = math_max_f32(vacc, voutput_min);
vacc = math_min_f32(vacc, voutput_max);
*output = vacc;
}
}
| 1,660 | 24.166667 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-minmax-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_minmax_ukernel__scalar_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
input_b += 4;
float vacc0 = va0 + vb0;
float vacc1 = va1 + vb1;
float vacc2 = va2 + vb2;
float vacc3 = va3 + vb3;
vacc0 = math_max_f32(vacc0, voutput_min);
vacc1 = math_max_f32(vacc1, voutput_min);
vacc2 = math_max_f32(vacc2, voutput_min);
vacc3 = math_max_f32(vacc3, voutput_min);
vacc0 = math_min_f32(vacc0, voutput_max);
vacc1 = math_min_f32(vacc1, voutput_max);
vacc2 = math_min_f32(vacc2, voutput_max);
vacc3 = math_min_f32(vacc3, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va + vb;
vacc = math_max_f32(vacc, voutput_min);
vacc = math_min_f32(vacc, voutput_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,131 | 25.65 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-minmax-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_minmax_ukernel__scalar_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
const float vb4 = input_b[4];
const float vb5 = input_b[5];
const float vb6 = input_b[6];
const float vb7 = input_b[7];
input_b += 8;
float vacc0 = va0 + vb0;
float vacc1 = va1 + vb1;
float vacc2 = va2 + vb2;
float vacc3 = va3 + vb3;
float vacc4 = va4 + vb4;
float vacc5 = va5 + vb5;
float vacc6 = va6 + vb6;
float vacc7 = va7 + vb7;
vacc0 = math_max_f32(vacc0, voutput_min);
vacc1 = math_max_f32(vacc1, voutput_min);
vacc2 = math_max_f32(vacc2, voutput_min);
vacc3 = math_max_f32(vacc3, voutput_min);
vacc4 = math_max_f32(vacc4, voutput_min);
vacc5 = math_max_f32(vacc5, voutput_min);
vacc6 = math_max_f32(vacc6, voutput_min);
vacc7 = math_max_f32(vacc7, voutput_min);
vacc0 = math_min_f32(vacc0, voutput_max);
vacc1 = math_min_f32(vacc1, voutput_max);
vacc2 = math_min_f32(vacc2, voutput_max);
vacc3 = math_min_f32(vacc3, voutput_max);
vacc4 = math_min_f32(vacc4, voutput_max);
vacc5 = math_min_f32(vacc5, voutput_max);
vacc6 = math_min_f32(vacc6, voutput_max);
vacc7 = math_min_f32(vacc7, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va + vb;
vacc = math_max_f32(vacc, voutput_min);
vacc = math_min_f32(vacc, voutput_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,979 | 27.653846 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-minmax-sse-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_minmax_ukernel__sse_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128 voutput_min = _mm_load_ps(params->sse.min);
const __m128 voutput_max = _mm_load_ps(params->sse.max);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 va = _mm_loadu_ps(input_a);
input_a += 4;
const __m128 vb = _mm_loadu_ps(input_b);
input_b += 4;
__m128 vacc = _mm_add_ps(va, vb);
vacc = _mm_max_ps(vacc, voutput_min);
vacc = _mm_min_ps(vacc, voutput_max);
_mm_storeu_ps(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 va = _mm_loadu_ps(input_a);
const __m128 vb = _mm_loadu_ps(input_b);
__m128 vacc = _mm_add_ps(va, vb);
vacc = _mm_max_ps(vacc, voutput_min);
vacc = _mm_min_ps(vacc, voutput_max);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 1,756 | 25.223881 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-minmax-sse-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_minmax_ukernel__sse_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128 voutput_min = _mm_load_ps(params->sse.min);
const __m128 voutput_max = _mm_load_ps(params->sse.max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(input_a);
const __m128 va1 = _mm_loadu_ps(input_a + 4);
input_a += 8;
const __m128 vb0 = _mm_loadu_ps(input_b);
const __m128 vb1 = _mm_loadu_ps(input_b + 4);
input_b += 8;
__m128 vacc0 = _mm_add_ps(va0, vb0);
__m128 vacc1 = _mm_add_ps(va1, vb1);
vacc0 = _mm_max_ps(vacc0, voutput_min);
vacc1 = _mm_max_ps(vacc1, voutput_min);
vacc0 = _mm_min_ps(vacc0, voutput_max);
vacc1 = _mm_min_ps(vacc1, voutput_max);
_mm_storeu_ps(output, vacc0);
_mm_storeu_ps(output + 4, vacc1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 va = _mm_loadu_ps(input_a);
input_a += 4;
const __m128 vb = _mm_loadu_ps(input_b);
input_b += 4;
__m128 vacc = _mm_add_ps(va, vb);
vacc = _mm_max_ps(vacc, voutput_min);
vacc = _mm_min_ps(vacc, voutput_max);
_mm_storeu_ps(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 va = _mm_loadu_ps(input_a);
const __m128 vb = _mm_loadu_ps(input_b);
__m128 vacc = _mm_add_ps(va, vb);
vacc = _mm_max_ps(vacc, voutput_min);
vacc = _mm_min_ps(vacc, voutput_max);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 2,408 | 25.766667 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-minmax-wasm-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_minmax_ukernel__wasm_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va + vb;
vacc = __builtin_wasm_max_f32(vacc, voutput_min);
vacc = __builtin_wasm_min_f32(vacc, voutput_max);
*output++ = vacc;
}
}
| 1,121 | 25.714286 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-minmax-wasm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_minmax_ukernel__wasm_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
input_b += 2;
float vacc0 = va0 + vb0;
float vacc1 = va1 + vb1;
vacc0 = __builtin_wasm_max_f32(vacc0, voutput_min);
vacc1 = __builtin_wasm_max_f32(vacc1, voutput_min);
vacc0 = __builtin_wasm_min_f32(vacc0, voutput_max);
vacc1 = __builtin_wasm_min_f32(vacc1, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
const float vb = *input_b;
float vacc = va + vb;
vacc = __builtin_wasm_max_f32(vacc, voutput_min);
vacc = __builtin_wasm_min_f32(vacc, voutput_max);
*output = vacc;
}
}
| 1,718 | 25.045455 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-minmax-wasm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_minmax_ukernel__wasm_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
input_b += 4;
float vacc0 = va0 + vb0;
float vacc1 = va1 + vb1;
float vacc2 = va2 + vb2;
float vacc3 = va3 + vb3;
vacc0 = __builtin_wasm_max_f32(vacc0, voutput_min);
vacc1 = __builtin_wasm_max_f32(vacc1, voutput_min);
vacc2 = __builtin_wasm_max_f32(vacc2, voutput_min);
vacc3 = __builtin_wasm_max_f32(vacc3, voutput_min);
vacc0 = __builtin_wasm_min_f32(vacc0, voutput_max);
vacc1 = __builtin_wasm_min_f32(vacc1, voutput_max);
vacc2 = __builtin_wasm_min_f32(vacc2, voutput_max);
vacc3 = __builtin_wasm_min_f32(vacc3, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va + vb;
vacc = __builtin_wasm_max_f32(vacc, voutput_min);
vacc = __builtin_wasm_min_f32(vacc, voutput_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,229 | 26.875 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-minmax-wasm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_minmax_ukernel__wasm_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
const float vb4 = input_b[4];
const float vb5 = input_b[5];
const float vb6 = input_b[6];
const float vb7 = input_b[7];
input_b += 8;
float vacc0 = va0 + vb0;
float vacc1 = va1 + vb1;
float vacc2 = va2 + vb2;
float vacc3 = va3 + vb3;
float vacc4 = va4 + vb4;
float vacc5 = va5 + vb5;
float vacc6 = va6 + vb6;
float vacc7 = va7 + vb7;
vacc0 = __builtin_wasm_max_f32(vacc0, voutput_min);
vacc1 = __builtin_wasm_max_f32(vacc1, voutput_min);
vacc2 = __builtin_wasm_max_f32(vacc2, voutput_min);
vacc3 = __builtin_wasm_max_f32(vacc3, voutput_min);
vacc4 = __builtin_wasm_max_f32(vacc4, voutput_min);
vacc5 = __builtin_wasm_max_f32(vacc5, voutput_min);
vacc6 = __builtin_wasm_max_f32(vacc6, voutput_min);
vacc7 = __builtin_wasm_max_f32(vacc7, voutput_min);
vacc0 = __builtin_wasm_min_f32(vacc0, voutput_max);
vacc1 = __builtin_wasm_min_f32(vacc1, voutput_max);
vacc2 = __builtin_wasm_min_f32(vacc2, voutput_max);
vacc3 = __builtin_wasm_min_f32(vacc3, voutput_max);
vacc4 = __builtin_wasm_min_f32(vacc4, voutput_max);
vacc5 = __builtin_wasm_min_f32(vacc5, voutput_max);
vacc6 = __builtin_wasm_min_f32(vacc6, voutput_max);
vacc7 = __builtin_wasm_min_f32(vacc7, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va + vb;
vacc = __builtin_wasm_max_f32(vacc, voutput_min);
vacc = __builtin_wasm_min_f32(vacc, voutput_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 3,157 | 29.365385 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-minmax-wasmsimd-arm-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_minmax_ukernel__wasmsimd_arm_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
const v128_t vb2 = wasm_v128_load(input_b + 8);
const v128_t vb3 = wasm_v128_load(input_b + 12);
input_b += 16;
v128_t vacc0 = wasm_f32x4_add(va0, vb0);
v128_t vacc1 = wasm_f32x4_add(va1, vb1);
v128_t vacc2 = wasm_f32x4_add(va2, vb2);
v128_t vacc3 = wasm_f32x4_add(va3, vb3);
vacc0 = wasm_f32x4_max(vacc0, voutput_min);
vacc1 = wasm_f32x4_max(vacc1, voutput_min);
vacc2 = wasm_f32x4_max(vacc2, voutput_min);
vacc3 = wasm_f32x4_max(vacc3, voutput_min);
vacc0 = wasm_f32x4_min(vacc0, voutput_max);
vacc1 = wasm_f32x4_min(vacc1, voutput_max);
vacc2 = wasm_f32x4_min(vacc2, voutput_max);
vacc3 = wasm_f32x4_min(vacc3, voutput_max);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
wasm_v128_store(output + 8, vacc2);
wasm_v128_store(output + 12, vacc3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_add(va, vb);
vacc = wasm_f32x4_max(vacc, voutput_min);
vacc = wasm_f32x4_min(vacc, voutput_max);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_add(va, vb);
vacc = wasm_f32x4_max(vacc, voutput_min);
vacc = wasm_f32x4_min(vacc, voutput_max);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 3,094 | 29.048544 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-minmax-wasmsimd-arm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_minmax_ukernel__wasmsimd_arm_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_add(va, vb);
vacc = wasm_f32x4_max(vacc, voutput_min);
vacc = wasm_f32x4_min(vacc, voutput_max);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_add(va, vb);
vacc = wasm_f32x4_max(vacc, voutput_min);
vacc = wasm_f32x4_min(vacc, voutput_max);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,827 | 25.882353 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-minmax-wasmsimd-arm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_minmax_ukernel__wasmsimd_arm_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
input_b += 8;
v128_t vacc0 = wasm_f32x4_add(va0, vb0);
v128_t vacc1 = wasm_f32x4_add(va1, vb1);
vacc0 = wasm_f32x4_max(vacc0, voutput_min);
vacc1 = wasm_f32x4_max(vacc1, voutput_min);
vacc0 = wasm_f32x4_min(vacc0, voutput_max);
vacc1 = wasm_f32x4_min(vacc1, voutput_max);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_add(va, vb);
vacc = wasm_f32x4_max(vacc, voutput_min);
vacc = wasm_f32x4_min(vacc, voutput_max);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_add(va, vb);
vacc = wasm_f32x4_max(vacc, voutput_min);
vacc = wasm_f32x4_min(vacc, voutput_max);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 2,515 | 26.648352 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-minmax-wasmsimd-x86-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_minmax_ukernel__wasmsimd_x86_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
const v128_t vb2 = wasm_v128_load(input_b + 8);
const v128_t vb3 = wasm_v128_load(input_b + 12);
input_b += 16;
v128_t vacc0 = wasm_f32x4_add(va0, vb0);
v128_t vacc1 = wasm_f32x4_add(va1, vb1);
v128_t vacc2 = wasm_f32x4_add(va2, vb2);
v128_t vacc3 = wasm_f32x4_add(va3, vb3);
vacc0 = wasm_f32x4_pmax(voutput_min, vacc0);
vacc1 = wasm_f32x4_pmax(voutput_min, vacc1);
vacc2 = wasm_f32x4_pmax(voutput_min, vacc2);
vacc3 = wasm_f32x4_pmax(voutput_min, vacc3);
vacc0 = wasm_f32x4_pmin(voutput_max, vacc0);
vacc1 = wasm_f32x4_pmin(voutput_max, vacc1);
vacc2 = wasm_f32x4_pmin(voutput_max, vacc2);
vacc3 = wasm_f32x4_pmin(voutput_max, vacc3);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
wasm_v128_store(output + 8, vacc2);
wasm_v128_store(output + 12, vacc3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_add(va, vb);
vacc = wasm_f32x4_pmax(voutput_min, vacc);
vacc = wasm_f32x4_pmin(voutput_max, vacc);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_add(va, vb);
vacc = wasm_f32x4_pmax(voutput_min, vacc);
vacc = wasm_f32x4_pmin(voutput_max, vacc);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 3,106 | 29.165049 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-minmax-wasmsimd-x86-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_minmax_ukernel__wasmsimd_x86_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_add(va, vb);
vacc = wasm_f32x4_pmax(voutput_min, vacc);
vacc = wasm_f32x4_pmin(voutput_max, vacc);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_add(va, vb);
vacc = wasm_f32x4_pmax(voutput_min, vacc);
vacc = wasm_f32x4_pmin(voutput_max, vacc);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,831 | 25.941176 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-minmax-wasmsimd-x86-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_minmax_ukernel__wasmsimd_x86_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
input_b += 8;
v128_t vacc0 = wasm_f32x4_add(va0, vb0);
v128_t vacc1 = wasm_f32x4_add(va1, vb1);
vacc0 = wasm_f32x4_pmax(voutput_min, vacc0);
vacc1 = wasm_f32x4_pmax(voutput_min, vacc1);
vacc0 = wasm_f32x4_pmin(voutput_max, vacc0);
vacc1 = wasm_f32x4_pmin(voutput_max, vacc1);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_add(va, vb);
vacc = wasm_f32x4_pmax(voutput_min, vacc);
vacc = wasm_f32x4_pmin(voutput_max, vacc);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_add(va, vb);
vacc = wasm_f32x4_pmax(voutput_min, vacc);
vacc = wasm_f32x4_pmin(voutput_max, vacc);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 2,523 | 26.736264 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-relu-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_relu_ukernel__scalar_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va + vb;
vacc = math_max_f32(vacc, 0.0f);
*output++ = vacc;
}
}
| 952 | 23.435897 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-relu-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_relu_ukernel__scalar_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
input_b += 2;
float vacc0 = va0 + vb0;
float vacc1 = va1 + vb1;
vacc0 = math_max_f32(vacc0, 0.0f);
vacc1 = math_max_f32(vacc1, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
const float vb = *input_b;
float vacc = va + vb;
vacc = math_max_f32(vacc, 0.0f);
*output = vacc;
}
}
| 1,402 | 22.383333 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-relu-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_relu_ukernel__scalar_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
input_b += 4;
float vacc0 = va0 + vb0;
float vacc1 = va1 + vb1;
float vacc2 = va2 + vb2;
float vacc3 = va3 + vb3;
vacc0 = math_max_f32(vacc0, 0.0f);
vacc1 = math_max_f32(vacc1, 0.0f);
vacc2 = math_max_f32(vacc2, 0.0f);
vacc3 = math_max_f32(vacc3, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va + vb;
vacc = math_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,765 | 23.527778 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-relu-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_relu_ukernel__scalar_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
const float vb4 = input_b[4];
const float vb5 = input_b[5];
const float vb6 = input_b[6];
const float vb7 = input_b[7];
input_b += 8;
float vacc0 = va0 + vb0;
float vacc1 = va1 + vb1;
float vacc2 = va2 + vb2;
float vacc3 = va3 + vb3;
float vacc4 = va4 + vb4;
float vacc5 = va5 + vb5;
float vacc6 = va6 + vb6;
float vacc7 = va7 + vb7;
vacc0 = math_max_f32(vacc0, 0.0f);
vacc1 = math_max_f32(vacc1, 0.0f);
vacc2 = math_max_f32(vacc2, 0.0f);
vacc3 = math_max_f32(vacc3, 0.0f);
vacc4 = math_max_f32(vacc4, 0.0f);
vacc5 = math_max_f32(vacc5, 0.0f);
vacc6 = math_max_f32(vacc6, 0.0f);
vacc7 = math_max_f32(vacc7, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va + vb;
vacc = math_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,401 | 25.108696 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-relu-wasm-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_relu_ukernel__wasm_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va + vb;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output++ = vacc;
}
}
| 960 | 23.641026 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-relu-wasm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_relu_ukernel__wasm_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
input_b += 2;
float vacc0 = va0 + vb0;
float vacc1 = va1 + vb1;
vacc0 = __builtin_wasm_max_f32(vacc0, 0.0f);
vacc1 = __builtin_wasm_max_f32(vacc1, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
const float vb = *input_b;
float vacc = va + vb;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output = vacc;
}
}
| 1,430 | 22.85 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-relu-wasm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_relu_ukernel__wasm_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
input_b += 4;
float vacc0 = va0 + vb0;
float vacc1 = va1 + vb1;
float vacc2 = va2 + vb2;
float vacc3 = va3 + vb3;
vacc0 = __builtin_wasm_max_f32(vacc0, 0.0f);
vacc1 = __builtin_wasm_max_f32(vacc1, 0.0f);
vacc2 = __builtin_wasm_max_f32(vacc2, 0.0f);
vacc3 = __builtin_wasm_max_f32(vacc3, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va + vb;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,813 | 24.194444 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-relu-wasm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_relu_ukernel__wasm_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
const float vb4 = input_b[4];
const float vb5 = input_b[5];
const float vb6 = input_b[6];
const float vb7 = input_b[7];
input_b += 8;
float vacc0 = va0 + vb0;
float vacc1 = va1 + vb1;
float vacc2 = va2 + vb2;
float vacc3 = va3 + vb3;
float vacc4 = va4 + vb4;
float vacc5 = va5 + vb5;
float vacc6 = va6 + vb6;
float vacc7 = va7 + vb7;
vacc0 = __builtin_wasm_max_f32(vacc0, 0.0f);
vacc1 = __builtin_wasm_max_f32(vacc1, 0.0f);
vacc2 = __builtin_wasm_max_f32(vacc2, 0.0f);
vacc3 = __builtin_wasm_max_f32(vacc3, 0.0f);
vacc4 = __builtin_wasm_max_f32(vacc4, 0.0f);
vacc5 = __builtin_wasm_max_f32(vacc5, 0.0f);
vacc6 = __builtin_wasm_max_f32(vacc6, 0.0f);
vacc7 = __builtin_wasm_max_f32(vacc7, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va + vb;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,489 | 26.065217 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-relu-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_relu_ukernel__wasmsimd_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vzero = wasm_i32x4_const_splat(0);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
const v128_t vb2 = wasm_v128_load(input_b + 8);
const v128_t vb3 = wasm_v128_load(input_b + 12);
input_b += 16;
v128_t vacc0 = wasm_f32x4_add(va0, vb0);
v128_t vacc1 = wasm_f32x4_add(va1, vb1);
v128_t vacc2 = wasm_f32x4_add(va2, vb2);
v128_t vacc3 = wasm_f32x4_add(va3, vb3);
vacc0 = wasm_i32x4_max(vacc0, vzero);
vacc1 = wasm_i32x4_max(vacc1, vzero);
vacc2 = wasm_i32x4_max(vacc2, vzero);
vacc3 = wasm_i32x4_max(vacc3, vzero);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
wasm_v128_store(output + 8, vacc2);
wasm_v128_store(output + 12, vacc3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_add(va, vb);
vacc = wasm_i32x4_max(vacc, vzero);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_add(va, vb);
vacc = wasm_i32x4_max(vacc, vzero);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 2,665 | 27.063158 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-relu-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_relu_ukernel__wasmsimd_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vzero = wasm_i32x4_const_splat(0);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_add(va, vb);
vacc = wasm_i32x4_max(vacc, vzero);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_add(va, vb);
vacc = wasm_i32x4_max(vacc, vzero);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,615 | 23.861538 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-relu-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_relu_ukernel__wasmsimd_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vzero = wasm_i32x4_const_splat(0);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
input_b += 8;
v128_t vacc0 = wasm_f32x4_add(va0, vb0);
v128_t vacc1 = wasm_f32x4_add(va1, vb1);
vacc0 = wasm_i32x4_max(vacc0, vzero);
vacc1 = wasm_i32x4_max(vacc1, vzero);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_add(va, vb);
vacc = wasm_i32x4_max(vacc, vzero);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_add(va, vb);
vacc = wasm_i32x4_max(vacc, vzero);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 2,194 | 24.823529 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_ukernel__scalar_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va + vb;
*output++ = vacc;
}
}
| 913 | 23.052632 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_ukernel__scalar_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
input_b += 2;
float vacc0 = va0 + vb0;
float vacc1 = va1 + vb1;
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
const float vb = *input_b;
float vacc = va + vb;
*output = vacc;
}
}
| 1,285 | 21.561404 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_ukernel__scalar_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
input_b += 4;
float vacc0 = va0 + vb0;
float vacc1 = va1 + vb1;
float vacc2 = va2 + vb2;
float vacc3 = va3 + vb3;
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va + vb;
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,568 | 22.41791 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_ukernel__scalar_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
const float vb0 = input_b[0];
const float vb1 = input_b[1];
const float vb2 = input_b[2];
const float vb3 = input_b[3];
const float vb4 = input_b[4];
const float vb5 = input_b[5];
const float vb6 = input_b[6];
const float vb7 = input_b[7];
input_b += 8;
float vacc0 = va0 + vb0;
float vacc1 = va1 + vb1;
float vacc2 = va2 + vb2;
float vacc3 = va3 + vb3;
float vacc4 = va4 + vb4;
float vacc5 = va5 + vb5;
float vacc6 = va6 + vb6;
float vacc7 = va7 + vb7;
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
const float vb = *input_b++;
float vacc = va + vb;
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,048 | 23.686747 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_ukernel__wasmsimd_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
const v128_t vb2 = wasm_v128_load(input_b + 8);
const v128_t vb3 = wasm_v128_load(input_b + 12);
input_b += 16;
v128_t vacc0 = wasm_f32x4_add(va0, vb0);
v128_t vacc1 = wasm_f32x4_add(va1, vb1);
v128_t vacc2 = wasm_f32x4_add(va2, vb2);
v128_t vacc3 = wasm_f32x4_add(va3, vb3);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
wasm_v128_store(output + 8, vacc2);
wasm_v128_store(output + 12, vacc3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_add(va, vb);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_add(va, vb);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 2,365 | 25.886364 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_ukernel__wasmsimd_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_add(va, vb);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_add(va, vb);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,483 | 22.935484 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vadd-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vadd_ukernel__wasmsimd_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
const v128_t vb0 = wasm_v128_load(input_b);
const v128_t vb1 = wasm_v128_load(input_b + 4);
input_b += 8;
v128_t vacc0 = wasm_f32x4_add(va0, vb0);
v128_t vacc1 = wasm_f32x4_add(va1, vb1);
wasm_v128_store(output, vacc0);
wasm_v128_store(output + 4, vacc1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
const v128_t vb = wasm_v128_load(input_b);
input_b += 4;
v128_t vacc = wasm_f32x4_add(va, vb);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
const v128_t vb = wasm_v128_load(input_b);
v128_t vacc = wasm_f32x4_add(va, vb);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,978 | 23.7375 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-minmax-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_minmax_ukernel__avx_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m256 voutput_min = _mm256_load_ps(params->avx.min);
const __m256 voutput_max = _mm256_load_ps(params->avx.max);
const __m256 vb = _mm256_broadcast_ss(input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m256 vacc0 = _mm256_loadu_ps(input_a);
__m256 vacc1 = _mm256_loadu_ps(input_a + 8);
input_a += 16;
vacc0 = _mm256_add_ps(vacc0, vb);
vacc1 = _mm256_add_ps(vacc1, vb);
vacc0 = _mm256_max_ps(voutput_min, vacc0);
vacc1 = _mm256_max_ps(voutput_min, vacc1);
vacc0 = _mm256_min_ps(voutput_max, vacc0);
vacc1 = _mm256_min_ps(voutput_max, vacc1);
_mm256_storeu_ps(output, vacc0);
_mm256_storeu_ps(output + 8, vacc1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vacc = _mm256_loadu_ps(input_a);
input_a += 8;
vacc = _mm256_add_ps(vacc, vb);
vacc = _mm256_max_ps(voutput_min, vacc);
vacc = _mm256_min_ps(voutput_max, vacc);
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vacc = _mm256_maskload_ps(input_a, vmask);
vacc = _mm256_add_ps(vacc, vb);
vacc = _mm256_max_ps(voutput_min, vacc);
vacc = _mm256_min_ps(voutput_max, vacc);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 2,630 | 27.912088 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-minmax-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_minmax_ukernel__avx_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m256 voutput_min = _mm256_load_ps(params->avx.min);
const __m256 voutput_max = _mm256_load_ps(params->avx.max);
const __m256 vb = _mm256_broadcast_ss(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vacc = _mm256_loadu_ps(input_a);
input_a += 8;
vacc = _mm256_add_ps(vacc, vb);
vacc = _mm256_max_ps(voutput_min, vacc);
vacc = _mm256_min_ps(voutput_max, vacc);
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vacc = _mm256_maskload_ps(input_a, vmask);
vacc = _mm256_add_ps(vacc, vb);
vacc = _mm256_max_ps(voutput_min, vacc);
vacc = _mm256_min_ps(voutput_max, vacc);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 2,078 | 27.875 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-minmax-avx512f-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_minmax_ukernel__avx512f_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m512 voutput_min = _mm512_set1_ps(params->scalar.min);
const __m512 voutput_max = _mm512_set1_ps(params->scalar.max);
const __m512 vb = _mm512_set1_ps(*input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc0 = _mm512_loadu_ps(input_a);
input_a += 16;
vacc0 = _mm512_add_ps(vacc0, vb);
vacc0 = _mm512_max_ps(voutput_min, vacc0);
vacc0 = _mm512_min_ps(voutput_max, vacc0);
_mm512_storeu_ps(output, vacc0);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a);
vacc = _mm512_maskz_add_ps(vmask, vacc, vb);
vacc = _mm512_maskz_max_ps(vmask, voutput_min, vacc);
vacc = _mm512_maskz_min_ps(vmask, voutput_max, vacc);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 1,888 | 28.515625 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-minmax-avx512f-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_minmax_ukernel__avx512f_x32(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m512 voutput_min = _mm512_set1_ps(params->scalar.min);
const __m512 voutput_max = _mm512_set1_ps(params->scalar.max);
const __m512 vb = _mm512_set1_ps(*input_b);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m512 vacc0 = _mm512_loadu_ps(input_a);
__m512 vacc1 = _mm512_loadu_ps(input_a + 16);
input_a += 32;
vacc0 = _mm512_add_ps(vacc0, vb);
vacc1 = _mm512_add_ps(vacc1, vb);
vacc0 = _mm512_max_ps(voutput_min, vacc0);
vacc1 = _mm512_max_ps(voutput_min, vacc1);
vacc0 = _mm512_min_ps(voutput_max, vacc0);
vacc1 = _mm512_min_ps(voutput_max, vacc1);
_mm512_storeu_ps(output, vacc0);
_mm512_storeu_ps(output + 16, vacc1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc = _mm512_loadu_ps(input_a);
input_a += 16;
vacc = _mm512_add_ps(vacc, vb);
vacc = _mm512_max_ps(voutput_min, vacc);
vacc = _mm512_min_ps(voutput_max, vacc);
_mm512_storeu_ps(output, vacc);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a);
vacc = _mm512_maskz_add_ps(vmask, vacc, vb);
vacc = _mm512_maskz_max_ps(vmask, voutput_min, vacc);
vacc = _mm512_maskz_min_ps(vmask, voutput_max, vacc);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 2,430 | 29.3875 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-minmax-neon-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-neon.c.in
// Generator: tools/xngen
//
// Copyright 2_lo9 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_minmax_ukernel__neon_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float32x4_t voutput_min = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t voutput_max = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vb = vld1q_dup_f32(input_b);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t va = vld1q_f32(input_a); input_a += 4;
float32x4_t vacc = vaddq_f32(va, vb);
vacc = vmaxq_f32(vacc, voutput_min);
vacc = vminq_f32(vacc, voutput_max);
vst1q_f32(output, vacc); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t va = vld1q_f32(input_a);
float32x4_t vacc = vaddq_f32(va, vb);
vacc = vmaxq_f32(vacc, voutput_min);
vacc = vminq_f32(vacc, voutput_max);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 1,710 | 27.04918 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-minmax-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-neon.c.in
// Generator: tools/xngen
//
// Copyright 2_lo9 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_minmax_ukernel__neon_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float32x4_t voutput_min = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t voutput_max = vld1q_dup_f32(¶ms->scalar.max);
const float32x4_t vb = vld1q_dup_f32(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vacc_ = vld1q_f32(input_a); input_a += 4;
float32x4_t vaccl = vld1q_f32(input_a); input_a += 4;
vacc_ = vaddq_f32(vacc_, vb);
vaccl = vaddq_f32(vaccl, vb);
vacc_ = vmaxq_f32(vacc_, voutput_min);
vaccl = vmaxq_f32(vaccl, voutput_min);
vacc_ = vminq_f32(vacc_, voutput_max);
vaccl = vminq_f32(vaccl, voutput_max);
vst1q_f32(output, vacc_); output += 4;
vst1q_f32(output, vaccl); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t va = vld1q_f32(input_a); input_a += 4;
float32x4_t vacc = vaddq_f32(va, vb);
vacc = vmaxq_f32(vacc, voutput_min);
vacc = vminq_f32(vacc, voutput_max);
vst1q_f32(output, vacc); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t va = vld1q_f32(input_a);
float32x4_t vacc = vaddq_f32(va, vb);
vacc = vmaxq_f32(vacc, voutput_min);
vacc = vminq_f32(vacc, voutput_max);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 2,228 | 27.576923 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-minmax-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_minmax_ukernel__scalar_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
const float vb = *input_b;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
float vacc = va + vb;
vacc = math_max_f32(vacc, voutput_min);
vacc = math_min_f32(vacc, voutput_max);
*output++ = vacc;
}
}
| 1,101 | 25.238095 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-minmax-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_minmax_ukernel__scalar_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
const float vb = *input_b;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
float vacc0 = va0 + vb;
float vacc1 = va1 + vb;
vacc0 = math_max_f32(vacc0, voutput_min);
vacc1 = math_max_f32(vacc1, voutput_min);
vacc0 = math_min_f32(vacc0, voutput_max);
vacc1 = math_min_f32(vacc1, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
float vacc = va + vb;
vacc = math_max_f32(vacc, voutput_min);
vacc = math_min_f32(vacc, voutput_max);
*output = vacc;
}
}
| 1,571 | 24.354839 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-minmax-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_minmax_ukernel__scalar_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
const float vb = *input_b;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
float vacc0 = va0 + vb;
float vacc1 = va1 + vb;
float vacc2 = va2 + vb;
float vacc3 = va3 + vb;
vacc0 = math_max_f32(vacc0, voutput_min);
vacc1 = math_max_f32(vacc1, voutput_min);
vacc2 = math_max_f32(vacc2, voutput_min);
vacc3 = math_max_f32(vacc3, voutput_min);
vacc0 = math_min_f32(vacc0, voutput_max);
vacc1 = math_min_f32(vacc1, voutput_max);
vacc2 = math_min_f32(vacc2, voutput_max);
vacc3 = math_min_f32(vacc3, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va + vb;
vacc = math_max_f32(vacc, voutput_min);
vacc = math_min_f32(vacc, voutput_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,968 | 25.608108 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-minmax-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_minmax_ukernel__scalar_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
const float vb = *input_b;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
float vacc0 = va0 + vb;
float vacc1 = va1 + vb;
float vacc2 = va2 + vb;
float vacc3 = va3 + vb;
float vacc4 = va4 + vb;
float vacc5 = va5 + vb;
float vacc6 = va6 + vb;
float vacc7 = va7 + vb;
vacc0 = math_max_f32(vacc0, voutput_min);
vacc1 = math_max_f32(vacc1, voutput_min);
vacc2 = math_max_f32(vacc2, voutput_min);
vacc3 = math_max_f32(vacc3, voutput_min);
vacc4 = math_max_f32(vacc4, voutput_min);
vacc5 = math_max_f32(vacc5, voutput_min);
vacc6 = math_max_f32(vacc6, voutput_min);
vacc7 = math_max_f32(vacc7, voutput_min);
vacc0 = math_min_f32(vacc0, voutput_max);
vacc1 = math_min_f32(vacc1, voutput_max);
vacc2 = math_min_f32(vacc2, voutput_max);
vacc3 = math_min_f32(vacc3, voutput_max);
vacc4 = math_min_f32(vacc4, voutput_max);
vacc5 = math_min_f32(vacc5, voutput_max);
vacc6 = math_min_f32(vacc6, voutput_max);
vacc7 = math_min_f32(vacc7, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va + vb;
vacc = math_max_f32(vacc, voutput_min);
vacc = math_min_f32(vacc, voutput_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,676 | 27.478723 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-minmax-sse-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_minmax_ukernel__sse_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128 voutput_min = _mm_load_ps(params->sse.min);
const __m128 voutput_max = _mm_load_ps(params->sse.max);
const __m128 vb = _mm_load1_ps(input_b);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 va = _mm_loadu_ps(input_a);
input_a += 4;
__m128 vacc = _mm_add_ps(va, vb);
vacc = _mm_max_ps(vacc, voutput_min);
vacc = _mm_min_ps(vacc, voutput_max);
_mm_storeu_ps(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 va = _mm_loadu_ps(input_a);
__m128 vacc = _mm_add_ps(va, vb);
vacc = _mm_max_ps(vacc, voutput_min);
vacc = _mm_min_ps(vacc, voutput_max);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 1,691 | 25.857143 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-minmax-sse-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_minmax_ukernel__sse_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128 voutput_min = _mm_load_ps(params->sse.min);
const __m128 voutput_max = _mm_load_ps(params->sse.max);
const __m128 vb = _mm_load1_ps(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 va0 = _mm_loadu_ps(input_a);
const __m128 va1 = _mm_loadu_ps(input_a + 4);
input_a += 8;
__m128 vacc0 = _mm_add_ps(va0, vb);
__m128 vacc1 = _mm_add_ps(va1, vb);
vacc0 = _mm_max_ps(vacc0, voutput_min);
vacc1 = _mm_max_ps(vacc1, voutput_min);
vacc0 = _mm_min_ps(vacc0, voutput_max);
vacc1 = _mm_min_ps(vacc1, voutput_max);
_mm_storeu_ps(output, vacc0);
_mm_storeu_ps(output + 4, vacc1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 va = _mm_loadu_ps(input_a);
input_a += 4;
__m128 vacc = _mm_add_ps(va, vb);
vacc = _mm_max_ps(vacc, voutput_min);
vacc = _mm_min_ps(vacc, voutput_max);
_mm_storeu_ps(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 va = _mm_loadu_ps(input_a);
__m128 vacc = _mm_add_ps(va, vb);
vacc = _mm_max_ps(vacc, voutput_min);
vacc = _mm_min_ps(vacc, voutput_max);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 2,226 | 26.158537 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-minmax-wasm-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_minmax_ukernel__wasm_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
const float vb = *input_b;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
float vacc = va + vb;
vacc = __builtin_wasm_max_f32(vacc, voutput_min);
vacc = __builtin_wasm_min_f32(vacc, voutput_max);
*output++ = vacc;
}
}
| 1,119 | 25.666667 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-minmax-wasm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_minmax_ukernel__wasm_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
const float vb = *input_b;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
float vacc0 = va0 + vb;
float vacc1 = va1 + vb;
vacc0 = __builtin_wasm_max_f32(vacc0, voutput_min);
vacc1 = __builtin_wasm_max_f32(vacc1, voutput_min);
vacc0 = __builtin_wasm_min_f32(vacc0, voutput_max);
vacc1 = __builtin_wasm_min_f32(vacc1, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
float vacc = va + vb;
vacc = __builtin_wasm_max_f32(vacc, voutput_min);
vacc = __builtin_wasm_min_f32(vacc, voutput_max);
*output = vacc;
}
}
| 1,629 | 25.290323 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-minmax-wasm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_minmax_ukernel__wasm_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
const float vb = *input_b;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
float vacc0 = va0 + vb;
float vacc1 = va1 + vb;
float vacc2 = va2 + vb;
float vacc3 = va3 + vb;
vacc0 = __builtin_wasm_max_f32(vacc0, voutput_min);
vacc1 = __builtin_wasm_max_f32(vacc1, voutput_min);
vacc2 = __builtin_wasm_max_f32(vacc2, voutput_min);
vacc3 = __builtin_wasm_max_f32(vacc3, voutput_min);
vacc0 = __builtin_wasm_min_f32(vacc0, voutput_max);
vacc1 = __builtin_wasm_min_f32(vacc1, voutput_max);
vacc2 = __builtin_wasm_min_f32(vacc2, voutput_max);
vacc3 = __builtin_wasm_min_f32(vacc3, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va + vb;
vacc = __builtin_wasm_max_f32(vacc, voutput_min);
vacc = __builtin_wasm_min_f32(vacc, voutput_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,066 | 26.932432 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-minmax-wasm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_minmax_ukernel__wasm_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float voutput_min = params->scalar.min;
const float voutput_max = params->scalar.max;
const float vb = *input_b;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
float vacc0 = va0 + vb;
float vacc1 = va1 + vb;
float vacc2 = va2 + vb;
float vacc3 = va3 + vb;
float vacc4 = va4 + vb;
float vacc5 = va5 + vb;
float vacc6 = va6 + vb;
float vacc7 = va7 + vb;
vacc0 = __builtin_wasm_max_f32(vacc0, voutput_min);
vacc1 = __builtin_wasm_max_f32(vacc1, voutput_min);
vacc2 = __builtin_wasm_max_f32(vacc2, voutput_min);
vacc3 = __builtin_wasm_max_f32(vacc3, voutput_min);
vacc4 = __builtin_wasm_max_f32(vacc4, voutput_min);
vacc5 = __builtin_wasm_max_f32(vacc5, voutput_min);
vacc6 = __builtin_wasm_max_f32(vacc6, voutput_min);
vacc7 = __builtin_wasm_max_f32(vacc7, voutput_min);
vacc0 = __builtin_wasm_min_f32(vacc0, voutput_max);
vacc1 = __builtin_wasm_min_f32(vacc1, voutput_max);
vacc2 = __builtin_wasm_min_f32(vacc2, voutput_max);
vacc3 = __builtin_wasm_min_f32(vacc3, voutput_max);
vacc4 = __builtin_wasm_min_f32(vacc4, voutput_max);
vacc5 = __builtin_wasm_min_f32(vacc5, voutput_max);
vacc6 = __builtin_wasm_min_f32(vacc6, voutput_max);
vacc7 = __builtin_wasm_min_f32(vacc7, voutput_max);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va + vb;
vacc = __builtin_wasm_max_f32(vacc, voutput_min);
vacc = __builtin_wasm_min_f32(vacc, voutput_max);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,854 | 29.37234 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-minmax-wasmsimd-arm-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_minmax_ukernel__wasmsimd_arm_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
v128_t vy0 = wasm_f32x4_add(va0, vb);
v128_t vy1 = wasm_f32x4_add(va1, vb);
v128_t vy2 = wasm_f32x4_add(va2, vb);
v128_t vy3 = wasm_f32x4_add(va3, vb);
vy0 = wasm_f32x4_max(vy0, voutput_min);
vy1 = wasm_f32x4_max(vy1, voutput_min);
vy2 = wasm_f32x4_max(vy2, voutput_min);
vy3 = wasm_f32x4_max(vy3, voutput_min);
vy0 = wasm_f32x4_min(vy0, voutput_max);
vy1 = wasm_f32x4_min(vy1, voutput_max);
vy2 = wasm_f32x4_min(vy2, voutput_max);
vy3 = wasm_f32x4_min(vy3, voutput_max);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
wasm_v128_store(output + 8, vy2);
wasm_v128_store(output + 12, vy3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_add(va, vb);
vy = wasm_f32x4_max(vy, voutput_min);
vy = wasm_f32x4_min(vy, voutput_max);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_add(va, vb);
vy = wasm_f32x4_max(vy, voutput_min);
vy = wasm_f32x4_min(vy, voutput_max);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,727 | 28.021277 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-minmax-wasmsimd-arm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_minmax_ukernel__wasmsimd_arm_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_add(va, vb);
vy = wasm_f32x4_max(vy, voutput_min);
vy = wasm_f32x4_min(vy, voutput_max);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_add(va, vb);
vy = wasm_f32x4_max(vy, voutput_min);
vy = wasm_f32x4_min(vy, voutput_max);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,737 | 25.738462 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-minmax-wasmsimd-arm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_minmax_ukernel__wasmsimd_arm_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
v128_t vy0 = wasm_f32x4_add(va0, vb);
v128_t vy1 = wasm_f32x4_add(va1, vb);
vy0 = wasm_f32x4_max(vy0, voutput_min);
vy1 = wasm_f32x4_max(vy1, voutput_min);
vy0 = wasm_f32x4_min(vy0, voutput_max);
vy1 = wasm_f32x4_min(vy1, voutput_max);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_add(va, vb);
vy = wasm_f32x4_max(vy, voutput_min);
vy = wasm_f32x4_min(vy, voutput_max);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_add(va, vb);
vy = wasm_f32x4_max(vy, voutput_min);
vy = wasm_f32x4_min(vy, voutput_max);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,280 | 26.154762 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-minmax-wasmsimd-x86-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_minmax_ukernel__wasmsimd_x86_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
v128_t vy0 = wasm_f32x4_add(va0, vb);
v128_t vy1 = wasm_f32x4_add(va1, vb);
v128_t vy2 = wasm_f32x4_add(va2, vb);
v128_t vy3 = wasm_f32x4_add(va3, vb);
vy0 = wasm_f32x4_pmax(voutput_min, vy0);
vy1 = wasm_f32x4_pmax(voutput_min, vy1);
vy2 = wasm_f32x4_pmax(voutput_min, vy2);
vy3 = wasm_f32x4_pmax(voutput_min, vy3);
vy0 = wasm_f32x4_pmin(voutput_max, vy0);
vy1 = wasm_f32x4_pmin(voutput_max, vy1);
vy2 = wasm_f32x4_pmin(voutput_max, vy2);
vy3 = wasm_f32x4_pmin(voutput_max, vy3);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
wasm_v128_store(output + 8, vy2);
wasm_v128_store(output + 12, vy3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_add(va, vb);
vy = wasm_f32x4_pmax(voutput_min, vy);
vy = wasm_f32x4_pmin(voutput_max, vy);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_add(va, vb);
vy = wasm_f32x4_pmax(voutput_min, vy);
vy = wasm_f32x4_pmin(voutput_max, vy);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,739 | 28.148936 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-minmax-wasmsimd-x86-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_minmax_ukernel__wasmsimd_x86_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_add(va, vb);
vy = wasm_f32x4_pmax(voutput_min, vy);
vy = wasm_f32x4_pmin(voutput_max, vy);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_add(va, vb);
vy = wasm_f32x4_pmax(voutput_min, vy);
vy = wasm_f32x4_pmin(voutput_max, vy);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,741 | 25.8 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-minmax-wasmsimd-x86-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_minmax_ukernel__wasmsimd_x86_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.max);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
v128_t vy0 = wasm_f32x4_add(va0, vb);
v128_t vy1 = wasm_f32x4_add(va1, vb);
vy0 = wasm_f32x4_pmax(voutput_min, vy0);
vy1 = wasm_f32x4_pmax(voutput_min, vy1);
vy0 = wasm_f32x4_pmin(voutput_max, vy0);
vy1 = wasm_f32x4_pmin(voutput_max, vy1);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_add(va, vb);
vy = wasm_f32x4_pmax(voutput_min, vy);
vy = wasm_f32x4_pmin(voutput_max, vy);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_add(va, vb);
vy = wasm_f32x4_pmax(voutput_min, vy);
vy = wasm_f32x4_pmin(voutput_max, vy);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,288 | 26.25 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-relu-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_relu_ukernel__scalar_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
float vacc = va + vb;
vacc = math_max_f32(vacc, 0.0f);
*output++ = vacc;
}
}
| 950 | 23.384615 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-relu-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_relu_ukernel__scalar_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
float vacc0 = va0 + vb;
float vacc1 = va1 + vb;
vacc0 = math_max_f32(vacc0, 0.0f);
vacc1 = math_max_f32(vacc1, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
float vacc = va + vb;
vacc = math_max_f32(vacc, 0.0f);
*output = vacc;
}
}
| 1,313 | 22.464286 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-relu-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_relu_ukernel__scalar_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
float vacc0 = va0 + vb;
float vacc1 = va1 + vb;
float vacc2 = va2 + vb;
float vacc3 = va3 + vb;
vacc0 = math_max_f32(vacc0, 0.0f);
vacc1 = math_max_f32(vacc1, 0.0f);
vacc2 = math_max_f32(vacc2, 0.0f);
vacc3 = math_max_f32(vacc3, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va + vb;
vacc = math_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,602 | 23.287879 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-relu-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_relu_ukernel__scalar_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
float vacc0 = va0 + vb;
float vacc1 = va1 + vb;
float vacc2 = va2 + vb;
float vacc3 = va3 + vb;
float vacc4 = va4 + vb;
float vacc5 = va5 + vb;
float vacc6 = va6 + vb;
float vacc7 = va7 + vb;
vacc0 = math_max_f32(vacc0, 0.0f);
vacc1 = math_max_f32(vacc1, 0.0f);
vacc2 = math_max_f32(vacc2, 0.0f);
vacc3 = math_max_f32(vacc3, 0.0f);
vacc4 = math_max_f32(vacc4, 0.0f);
vacc5 = math_max_f32(vacc5, 0.0f);
vacc6 = math_max_f32(vacc6, 0.0f);
vacc7 = math_max_f32(vacc7, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va + vb;
vacc = math_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,098 | 24.597561 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-relu-wasm-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_relu_ukernel__wasm_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
float vacc = va + vb;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output++ = vacc;
}
}
| 958 | 23.589744 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-relu-wasm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_relu_ukernel__wasm_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
float vacc0 = va0 + vb;
float vacc1 = va1 + vb;
vacc0 = __builtin_wasm_max_f32(vacc0, 0.0f);
vacc1 = __builtin_wasm_max_f32(vacc1, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
float vacc = va + vb;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output = vacc;
}
}
| 1,341 | 22.964286 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-relu-wasm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_relu_ukernel__wasm_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
float vacc0 = va0 + vb;
float vacc1 = va1 + vb;
float vacc2 = va2 + vb;
float vacc3 = va3 + vb;
vacc0 = __builtin_wasm_max_f32(vacc0, 0.0f);
vacc1 = __builtin_wasm_max_f32(vacc1, 0.0f);
vacc2 = __builtin_wasm_max_f32(vacc2, 0.0f);
vacc3 = __builtin_wasm_max_f32(vacc3, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va + vb;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,650 | 24.015152 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-relu-wasm-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_relu_ukernel__wasm_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
float vacc0 = va0 + vb;
float vacc1 = va1 + vb;
float vacc2 = va2 + vb;
float vacc3 = va3 + vb;
float vacc4 = va4 + vb;
float vacc5 = va5 + vb;
float vacc6 = va6 + vb;
float vacc7 = va7 + vb;
vacc0 = __builtin_wasm_max_f32(vacc0, 0.0f);
vacc1 = __builtin_wasm_max_f32(vacc1, 0.0f);
vacc2 = __builtin_wasm_max_f32(vacc2, 0.0f);
vacc3 = __builtin_wasm_max_f32(vacc3, 0.0f);
vacc4 = __builtin_wasm_max_f32(vacc4, 0.0f);
vacc5 = __builtin_wasm_max_f32(vacc5, 0.0f);
vacc6 = __builtin_wasm_max_f32(vacc6, 0.0f);
vacc7 = __builtin_wasm_max_f32(vacc7, 0.0f);
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va + vb;
vacc = __builtin_wasm_max_f32(vacc, 0.0f);
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,186 | 25.670732 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-relu-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_relu_ukernel__wasmsimd_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vzero = wasm_i32x4_const_splat(0);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
v128_t vy0 = wasm_f32x4_add(va0, vb);
v128_t vy1 = wasm_f32x4_add(va1, vb);
v128_t vy2 = wasm_f32x4_add(va2, vb);
v128_t vy3 = wasm_f32x4_add(va3, vb);
vy0 = wasm_i32x4_max(vy0, vzero);
vy1 = wasm_i32x4_max(vy1, vzero);
vy2 = wasm_i32x4_max(vy2, vzero);
vy3 = wasm_i32x4_max(vy3, vzero);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
wasm_v128_store(output + 8, vy2);
wasm_v128_store(output + 12, vy3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_add(va, vb);
vy = wasm_i32x4_max(vy, vzero);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_add(va, vb);
vy = wasm_i32x4_max(vy, vzero);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,322 | 26.011628 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-relu-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_relu_ukernel__wasmsimd_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vzero = wasm_i32x4_const_splat(0);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_add(va, vb);
vy = wasm_i32x4_max(vy, vzero);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_add(va, vb);
vy = wasm_i32x4_max(vy, vzero);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,533 | 23.741935 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-relu-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_relu_ukernel__wasmsimd_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_relu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vzero = wasm_i32x4_const_splat(0);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
v128_t vy0 = wasm_f32x4_add(va0, vb);
v128_t vy1 = wasm_f32x4_add(va1, vb);
vy0 = wasm_i32x4_max(vy0, vzero);
vy1 = wasm_i32x4_max(vy1, vzero);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_add(va, vb);
vy = wasm_i32x4_max(vy, vzero);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_add(va, vb);
vy = wasm_i32x4_max(vy, vzero);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,975 | 24.333333 | 87 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_ukernel__scalar_x1(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= sizeof(float); batch -= sizeof(float)) {
const float va = *input_a++;
float vacc = va + vb;
*output++ = vacc;
}
}
| 911 | 23 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_ukernel__scalar_x2(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
input_a += 2;
float vacc0 = va0 + vb;
float vacc1 = va1 + vb;
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch == sizeof(float));
const float va = *input_a;
float vacc = va + vb;
*output = vacc;
}
}
| 1,196 | 21.584906 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_ukernel__scalar_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
input_a += 4;
float vacc0 = va0 + vb;
float vacc1 = va1 + vb;
float vacc2 = va2 + vb;
float vacc3 = va3 + vb;
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va + vb;
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,405 | 22.04918 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-scalar-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_ukernel__scalar_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float vb = *input_b;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float va0 = input_a[0];
const float va1 = input_a[1];
const float va2 = input_a[2];
const float va3 = input_a[3];
const float va4 = input_a[4];
const float va5 = input_a[5];
const float va6 = input_a[6];
const float va7 = input_a[7];
input_a += 8;
float vacc0 = va0 + vb;
float vacc1 = va1 + vb;
float vacc2 = va2 + vb;
float vacc3 = va3 + vb;
float vacc4 = va4 + vb;
float vacc5 = va5 + vb;
float vacc6 = va6 + vb;
float vacc7 = va7 + vb;
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output[4] = vacc4;
output[5] = vacc5;
output[6] = vacc6;
output[7] = vacc7;
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float va = *input_a++;
float vacc = va + vb;
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,745 | 22.917808 | 76 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_ukernel__wasmsimd_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
const v128_t va2 = wasm_v128_load(input_a + 8);
const v128_t va3 = wasm_v128_load(input_a + 12);
input_a += 16;
v128_t vy0 = wasm_f32x4_add(va0, vb);
v128_t vy1 = wasm_f32x4_add(va1, vb);
v128_t vy2 = wasm_f32x4_add(va2, vb);
v128_t vy3 = wasm_f32x4_add(va3, vb);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
wasm_v128_store(output + 8, vy2);
wasm_v128_store(output + 12, vy3);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_add(va, vb);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_add(va, vb);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,046 | 24.911392 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_ukernel__wasmsimd_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_add(va, vb);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_add(va, vb);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,409 | 22.898305 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vaddc-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vopc-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vaddc_ukernel__wasmsimd_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_default_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vb = wasm_v128_load32_splat(input_b);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t va0 = wasm_v128_load(input_a);
const v128_t va1 = wasm_v128_load(input_a + 4);
input_a += 8;
v128_t vy0 = wasm_f32x4_add(va0, vb);
v128_t vy1 = wasm_f32x4_add(va1, vb);
wasm_v128_store(output, vy0);
wasm_v128_store(output + 4, vy1);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t va = wasm_v128_load(input_a);
input_a += 4;
v128_t vy = wasm_f32x4_add(va, vb);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t va = wasm_v128_load(input_a);
v128_t vy = wasm_f32x4_add(va, vb);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 1,775 | 23.328767 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-minmax-aarch64-neon-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_minmax_ukernel__aarch64_neon_x4(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float32x4_t voutput_min = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t voutput_max = vld1q_dup_f32(¶ms->scalar.max);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t va = vld1q_f32(input_a); input_a += 4;
const float32x4_t vb = vld1q_f32(input_b); input_b += 4;
float32x4_t vacc = vdivq_f32(va, vb);
vacc = vmaxq_f32(vacc, voutput_min);
vacc = vminq_f32(vacc, voutput_max);
vst1q_f32(output, vacc); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t va = vld1q_f32(input_a);
const float32x4_t vb = vld1q_f32(input_b);
float32x4_t vacc = vdivq_f32(va, vb);
vacc = vmaxq_f32(vacc, voutput_min);
vacc = vminq_f32(vacc, voutput_max);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 1,774 | 27.629032 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-minmax-aarch64-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_minmax_ukernel__aarch64_neon_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const float32x4_t voutput_min = vld1q_dup_f32(¶ms->scalar.min);
const float32x4_t voutput_max = vld1q_dup_f32(¶ms->scalar.max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t va0 = vld1q_f32(input_a); input_a += 4;
const float32x4_t vb0 = vld1q_f32(input_b); input_b += 4;
const float32x4_t va1 = vld1q_f32(input_a); input_a += 4;
const float32x4_t vb1 = vld1q_f32(input_b); input_b += 4;
float32x4_t vacc0 = vdivq_f32(va0, vb0);
float32x4_t vacc1 = vdivq_f32(va1, vb1);
vacc0 = vmaxq_f32(vacc0, voutput_min);
vacc1 = vmaxq_f32(vacc1, voutput_min);
vacc0 = vminq_f32(vacc0, voutput_max);
vacc1 = vminq_f32(vacc1, voutput_max);
vst1q_f32(output, vacc0); output += 4;
vst1q_f32(output, vacc1); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t va = vld1q_f32(input_a); input_a += 4;
const float32x4_t vb = vld1q_f32(input_b); input_b += 4;
float32x4_t vacc = vdivq_f32(va, vb);
vacc = vmaxq_f32(vacc, voutput_min);
vacc = vminq_f32(vacc, voutput_max);
vst1q_f32(output, vacc); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t va = vld1q_f32(input_a);
const float32x4_t vb = vld1q_f32(input_b);
float32x4_t vacc = vdivq_f32(va, vb);
vacc = vmaxq_f32(vacc, voutput_min);
vacc = vminq_f32(vacc, voutput_max);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 2,446 | 29.209877 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-minmax-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_minmax_ukernel__avx_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m256 voutput_min = _mm256_load_ps(params->avx.min);
const __m256 voutput_max = _mm256_load_ps(params->avx.max);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m256 vacc0 = _mm256_loadu_ps(input_a);
__m256 vacc1 = _mm256_loadu_ps(input_a + 8);
input_a += 16;
vacc0 = _mm256_div_ps(vacc0, _mm256_loadu_ps(input_b));
vacc1 = _mm256_div_ps(vacc1, _mm256_loadu_ps(input_b + 8));
input_b += 16;
vacc0 = _mm256_max_ps(voutput_min, vacc0);
vacc1 = _mm256_max_ps(voutput_min, vacc1);
vacc0 = _mm256_min_ps(voutput_max, vacc0);
vacc1 = _mm256_min_ps(voutput_max, vacc1);
_mm256_storeu_ps(output, vacc0);
_mm256_storeu_ps(output + 8, vacc1);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vacc = _mm256_loadu_ps(input_a);
input_a += 8;
vacc = _mm256_div_ps(vacc, _mm256_loadu_ps(input_b));
input_b += 8;
vacc = _mm256_max_ps(voutput_min, vacc);
vacc = _mm256_min_ps(voutput_max, vacc);
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vacc = _mm256_maskload_ps(input_a, vmask);
const __m256 vb = _mm256_maskload_ps(input_b, vmask);
vacc = _mm256_div_ps(vacc, vb);
vacc = _mm256_max_ps(voutput_min, vacc);
vacc = _mm256_min_ps(voutput_max, vacc);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 2,743 | 28.505376 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-minmax-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_minmax_ukernel__avx_x8(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m256 voutput_min = _mm256_load_ps(params->avx.min);
const __m256 voutput_max = _mm256_load_ps(params->avx.max);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m256 vacc = _mm256_loadu_ps(input_a);
input_a += 8;
vacc = _mm256_div_ps(vacc, _mm256_loadu_ps(input_b));
input_b += 8;
vacc = _mm256_max_ps(voutput_min, vacc);
vacc = _mm256_min_ps(voutput_max, vacc);
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
__m256 vacc = _mm256_maskload_ps(input_a, vmask);
const __m256 vb = _mm256_maskload_ps(input_b, vmask);
vacc = _mm256_div_ps(vacc, vb);
vacc = _mm256_max_ps(voutput_min, vacc);
vacc = _mm256_min_ps(voutput_max, vacc);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 2,124 | 28.109589 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-minmax-avx512f-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_minmax_ukernel__avx512f_x16(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m512 voutput_min = _mm512_set1_ps(params->scalar.min);
const __m512 voutput_max = _mm512_set1_ps(params->scalar.max);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc = _mm512_loadu_ps(input_a);
input_a += 16;
vacc = _mm512_div_ps(vacc, _mm512_loadu_ps(input_b));
input_b += 16;
vacc = _mm512_max_ps(voutput_min, vacc);
vacc = _mm512_min_ps(voutput_max, vacc);
_mm512_storeu_ps(output, vacc);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a);
vacc = _mm512_maskz_div_ps(vmask, vacc, _mm512_maskz_loadu_ps(vmask, input_b));
vacc = _mm512_maskz_max_ps(vmask, voutput_min, vacc);
vacc = _mm512_maskz_min_ps(vmask, voutput_max, vacc);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 1,906 | 29.758065 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vbinary/gen/f32-vdiv-minmax-avx512f-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vbinary/vop-avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vbinary.h>
void xnn_f32_vdiv_minmax_ukernel__avx512f_x32(
size_t batch,
const float* input_a,
const float* input_b,
float* output,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m512 voutput_min = _mm512_set1_ps(params->scalar.min);
const __m512 voutput_max = _mm512_set1_ps(params->scalar.max);
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m512 vacc0 = _mm512_loadu_ps(input_a);
__m512 vacc1 = _mm512_loadu_ps(input_a + 16);
input_a += 32;
vacc0 = _mm512_div_ps(vacc0, _mm512_loadu_ps(input_b));
vacc1 = _mm512_div_ps(vacc1, _mm512_loadu_ps(input_b + 16));
input_b += 32;
vacc0 = _mm512_max_ps(voutput_min, vacc0);
vacc1 = _mm512_max_ps(voutput_min, vacc1);
vacc0 = _mm512_min_ps(voutput_max, vacc0);
vacc1 = _mm512_min_ps(voutput_max, vacc1);
_mm512_storeu_ps(output, vacc0);
_mm512_storeu_ps(output + 16, vacc1);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc = _mm512_loadu_ps(input_a);
input_a += 16;
vacc = _mm512_div_ps(vacc, _mm512_loadu_ps(input_b));
input_b += 16;
vacc = _mm512_max_ps(voutput_min, vacc);
vacc = _mm512_min_ps(voutput_max, vacc);
_mm512_storeu_ps(output, vacc);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input_a);
vacc = _mm512_maskz_div_ps(vmask, vacc, _mm512_maskz_loadu_ps(vmask, input_b));
vacc = _mm512_maskz_max_ps(vmask, voutput_min, vacc);
vacc = _mm512_maskz_min_ps(vmask, voutput_max, vacc);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 2,527 | 29.829268 | 105 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.