repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-wasmsimd-rr2-p5-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/wasmsimd-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x20(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const v128_t vi_max = wasm_v128_load32_splat(max);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
// Load 20 (5x4) inputs at a time.
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
const v128_t vi89AB = wasm_v128_load(input + 8);
const v128_t viCDEF = wasm_v128_load(input + 12);
const v128_t viGHIJ = wasm_v128_load(input + 16);
input += 20;
const v128_t vx0123 = wasm_f32x4_sub(vi0123, vi_max);
const v128_t vx4567 = wasm_f32x4_sub(vi4567, vi_max);
const v128_t vx89AB = wasm_f32x4_sub(vi89AB, vi_max);
const v128_t vxCDEF = wasm_f32x4_sub(viCDEF, vi_max);
const v128_t vxGHIJ = wasm_f32x4_sub(viGHIJ, vi_max);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vx0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vx4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vx89AB, vlog2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vxCDEF, vlog2e), vmagic_bias);
v128_t vnGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vxGHIJ, vlog2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
const v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vx0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vx4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vx89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vxCDEF);
v128_t vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_hi), vxGHIJ);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_lo), vtGHIJ);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt0123), vc4);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt4567), vc4);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt89AB), vc4);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc5, vtCDEF), vc4);
v128_t vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vc5, vtGHIJ), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc3);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc1);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc1);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc1);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc1);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
v128_t vf0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vs0123);
v128_t vf4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vs4567);
v128_t vf89AB = wasm_f32x4_add(wasm_f32x4_mul(vt89AB, vp89AB), vs89AB);
v128_t vfCDEF = wasm_f32x4_add(wasm_f32x4_mul(vtCDEF, vpCDEF), vsCDEF);
v128_t vfGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vtGHIJ, vpGHIJ), vsGHIJ);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_lt(vx0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_lt(vx4567, vdenorm_cutoff));
vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_lt(vx89AB, vdenorm_cutoff));
vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_lt(vxCDEF, vdenorm_cutoff));
vfGHIJ = wasm_v128_andnot(vfGHIJ, wasm_f32x4_lt(vxGHIJ, vdenorm_cutoff));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
wasm_v128_store(output + 8, vf89AB);
wasm_v128_store(output + 12, vfCDEF);
wasm_v128_store(output + 16, vfGHIJ);
output += 20;
vacc0 = wasm_f32x4_add(vacc0, vf0123);
vacc0 = wasm_f32x4_add(vacc0, vf4567);
vacc0 = wasm_f32x4_add(vacc0, vf89AB);
vacc0 = wasm_f32x4_add(vacc0, vfCDEF);
vacc0 = wasm_f32x4_add(vacc0, vfGHIJ);
}
v128_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vi = wasm_v128_load(input);
input += 4;
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
wasm_v128_store(output, vf);
output += 4;
vacc = wasm_f32x4_add(vacc, vf);
}
vacc = wasm_f32x4_add(vacc, wasm_v64x2_shuffle(vacc, vacc, 1, 1));
float vsum = wasm_f32x4_extract_lane(vacc, 0) + wasm_f32x4_extract_lane(vacc, 1);
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const v128_t vi = wasm_v128_load(input);
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
output += 2;
vsum += wasm_f32x4_extract_lane(vf, 0) + wasm_f32x4_extract_lane(vf, 1);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
vsum += wasm_f32x4_extract_lane(vf, 0);
}
}
*sum = vsum;
}
| 9,346 | 41.294118 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-wasmsimd-rr2-p5-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/wasmsimd-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x4(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const v128_t vi_max = wasm_v128_load32_splat(max);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
// Load 4 (1x4) inputs at a time.
const v128_t vi0123 = wasm_v128_load(input);
input += 4;
const v128_t vx0123 = wasm_f32x4_sub(vi0123, vi_max);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vx0123, vlog2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vx0123);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt0123), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
v128_t vf0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vs0123);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_lt(vx0123, vdenorm_cutoff));
wasm_v128_store(output, vf0123);
output += 4;
vacc0 = wasm_f32x4_add(vacc0, vf0123);
}
v128_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vi = wasm_v128_load(input);
input += 4;
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
wasm_v128_store(output, vf);
output += 4;
vacc = wasm_f32x4_add(vacc, vf);
}
vacc = wasm_f32x4_add(vacc, wasm_v64x2_shuffle(vacc, vacc, 1, 1));
float vsum = wasm_f32x4_extract_lane(vacc, 0) + wasm_f32x4_extract_lane(vacc, 1);
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const v128_t vi = wasm_v128_load(input);
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
output += 2;
vsum += wasm_f32x4_extract_lane(vf, 0) + wasm_f32x4_extract_lane(vf, 1);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
vsum += wasm_f32x4_extract_lane(vf, 0);
}
}
*sum = vsum;
}
| 5,312 | 32.840764 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-wasmsimd-rr2-p5-x8-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/wasmsimd-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x8_acc2(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const v128_t vi_max = wasm_v128_load32_splat(max);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
v128_t vacc1 = vacc0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 (2x4) inputs at a time.
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input += 8;
const v128_t vx0123 = wasm_f32x4_sub(vi0123, vi_max);
const v128_t vx4567 = wasm_f32x4_sub(vi4567, vi_max);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vx0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vx4567, vlog2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vx0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vx4567);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt0123), vc4);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt4567), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc1);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
v128_t vf0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vs0123);
v128_t vf4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vs4567);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_lt(vx0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_lt(vx4567, vdenorm_cutoff));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
output += 8;
vacc0 = wasm_f32x4_add(vacc0, vf0123);
vacc0 = wasm_f32x4_add(vacc0, vf4567);
}
// Add up all accumulators to vacc0
vacc0 = wasm_f32x4_add(vacc0, vacc1);
v128_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vi = wasm_v128_load(input);
input += 4;
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
wasm_v128_store(output, vf);
output += 4;
vacc = wasm_f32x4_add(vacc, vf);
}
vacc = wasm_f32x4_add(vacc, wasm_v64x2_shuffle(vacc, vacc, 1, 1));
float vsum = wasm_f32x4_extract_lane(vacc, 0) + wasm_f32x4_extract_lane(vacc, 1);
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const v128_t vi = wasm_v128_load(input);
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
output += 2;
vsum += wasm_f32x4_extract_lane(vf, 0) + wasm_f32x4_extract_lane(vf, 1);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
vsum += wasm_f32x4_extract_lane(vf, 0);
}
}
*sum = vsum;
}
| 6,425 | 35.511364 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-raddstoreexpminusmax/gen/f32-raddstoreexpminusmax-wasmsimd-rr2-p5-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-raddstoreexpminusmax/wasmsimd-rr2-p5.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x8(
size_t batch,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(max != NULL);
assert(output != NULL);
assert(sum != NULL);
const v128_t vi_max = wasm_v128_load32_splat(max);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.log2e);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_ln2_lo);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
v128_t vacc0 = wasm_f32x4_const_splat(0.0f);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
// Load 8 (2x4) inputs at a time.
const v128_t vi0123 = wasm_v128_load(input);
const v128_t vi4567 = wasm_v128_load(input + 4);
input += 8;
const v128_t vx0123 = wasm_f32x4_sub(vi0123, vi_max);
const v128_t vx4567 = wasm_f32x4_sub(vi4567, vi_max);
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vx0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vx4567, vlog2e), vmagic_bias);
const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vx0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vx4567);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt0123), vc4);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt4567), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc1);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc1);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
v128_t vf0123 = wasm_f32x4_add(wasm_f32x4_mul(vt0123, vp0123), vs0123);
v128_t vf4567 = wasm_f32x4_add(wasm_f32x4_mul(vt4567, vp4567), vs4567);
vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_lt(vx0123, vdenorm_cutoff));
vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_lt(vx4567, vdenorm_cutoff));
wasm_v128_store(output, vf0123);
wasm_v128_store(output + 4, vf4567);
output += 8;
vacc0 = wasm_f32x4_add(vacc0, vf0123);
vacc0 = wasm_f32x4_add(vacc0, vf4567);
}
v128_t vacc = vacc0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vi = wasm_v128_load(input);
input += 4;
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
wasm_v128_store(output, vf);
output += 4;
vacc = wasm_f32x4_add(vacc, vf);
}
vacc = wasm_f32x4_add(vacc, wasm_v64x2_shuffle(vacc, vacc, 1, 1));
float vsum = wasm_f32x4_extract_lane(vacc, 0) + wasm_f32x4_extract_lane(vacc, 1);
if (batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 3 * sizeof(float));
const v128_t vi = wasm_v128_load(input);
const v128_t vx = wasm_f32x4_sub(vi, vi_max);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vx, vlog2e), vmagic_bias);
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vx);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc5, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc1);
vt = wasm_f32x4_mul(vt, vs);
v128_t vf = wasm_f32x4_add(wasm_f32x4_mul(vt, vp), vs);
vf = wasm_v128_andnot(vf, wasm_f32x4_lt(vx, vdenorm_cutoff));
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vf, 0);
output += 2;
vsum += wasm_f32x4_extract_lane(vf, 0) + wasm_f32x4_extract_lane(vf, 1);
vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vf, 0);
vsum += wasm_f32x4_extract_lane(vf, 0);
}
}
*sum = vsum;
}
| 6,318 | 35.526012 | 94 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rmax/f32-rmax-avx.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/rmax.h>
void xnn_f32_rmax_ukernel__avx(
size_t batch,
const float* input,
float* output)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m256 vmax0 = _mm256_broadcast_ss(input);
__m256 vmax1 = vmax0;
__m256 vmax2 = vmax0;
__m256 vmax3 = vmax0;
for (; batch >= 128; batch -= 128) {
const __m256 vx0 = _mm256_loadu_ps(input);
const __m256 vx1 = _mm256_loadu_ps(input + 8);
const __m256 vx2 = _mm256_loadu_ps(input + 16);
const __m256 vx3 = _mm256_loadu_ps(input + 24);
input += 32;
vmax0 = _mm256_max_ps(vmax0, vx0);
vmax1 = _mm256_max_ps(vmax1, vx1);
vmax2 = _mm256_max_ps(vmax2, vx2);
vmax3 = _mm256_max_ps(vmax3, vx3);
}
__m256 vmax = _mm256_max_ps(_mm256_max_ps(vmax0, vmax1), _mm256_max_ps(vmax2, vmax3));
for (; batch >= 32; batch -= 32) {
const __m256 vx = _mm256_loadu_ps(input);
vmax = _mm256_max_ps(vmax, vx);
input += 8;
}
__m128 vmax_lo = _mm_max_ps(_mm256_castps256_ps128(vmax), _mm256_extractf128_ps(vmax, 1));
vmax_lo = _mm_max_ps(vmax_lo, _mm_movehl_ps(vmax_lo, vmax_lo));
vmax_lo = _mm_max_ss(vmax_lo, _mm_shuffle_ps(vmax_lo, vmax_lo, _MM_SHUFFLE(3, 3, 1, 1)));
if XNN_UNLIKELY(batch != 0) {
do {
vmax_lo = _mm_max_ss(vmax_lo, _mm_load_ss(input));
input += 1;
batch -= 4;
} while (batch != 0);
}
_mm_store_ss(output, vmax_lo);
}
| 1,659 | 28.122807 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rmax/f32-rmax-neon.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/rmax.h>
void xnn_f32_rmax_ukernel__neon(
size_t batch,
const float* input,
float* output)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float32x4_t vmax0 = vld1q_dup_f32(input);
float32x4_t vmax1 = vmax0;
float32x4_t vmax2 = vmax0;
float32x4_t vmax3 = vmax0;
for (; batch >= 64; batch -= 64) {
const float32x4_t vx0 = vld1q_f32(input); input += 4;
const float32x4_t vx1 = vld1q_f32(input); input += 4;
const float32x4_t vx2 = vld1q_f32(input); input += 4;
const float32x4_t vx3 = vld1q_f32(input); input += 4;
vmax0 = vmaxq_f32(vmax0, vx0);
vmax1 = vmaxq_f32(vmax1, vx1);
vmax2 = vmaxq_f32(vmax2, vx2);
vmax3 = vmaxq_f32(vmax3, vx3);
}
float32x4_t vmax = vmaxq_f32(vmaxq_f32(vmax0, vmax1), vmaxq_f32(vmax2, vmax3));
for (; batch >= 16; batch -= 16) {
const float32x4_t vx = vld1q_f32(input); input += 4;
vmax = vmaxq_f32(vmax, vx);
}
#if XNN_ARCH_ARM64
float32x2_t vmax_lo = vget_low_f32(vpmaxq_f32(vmax, vmax));
#else
float32x2_t vmax_lo = vmax_f32(vget_low_f32(vmax), vget_high_f32(vmax));
#endif
if XNN_UNLIKELY(batch != 0) {
do {
const float32x2_t vx = vld1_dup_f32(input); input += 1;
vmax_lo = vmax_f32(vmax_lo, vx);
batch -= 4;
} while (batch != 0);
}
#if XNN_ARCH_ARM64
*output = vmaxv_f32(vmax_lo);
#else
vst1_lane_f32(output, vpmax_f32(vmax_lo, vmax_lo), 0);
#endif
}
| 1,712 | 26.629032 | 81 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rmax/f32-rmax-scalar.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/rmax.h>
void xnn_f32_rmax_ukernel__scalar(
size_t batch,
const float* input,
float* output)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmax0 = *input;
float vmax1 = vmax0;
float vmax2 = vmax0;
float vmax3 = vmax0;
for (; batch >= 16; batch -= 16) {
const float vx0 = input[0];
const float vx1 = input[1];
const float vx2 = input[2];
const float vx3 = input[3];
input += 4;
vmax0 = math_max_f32(vx0, vmax0);
vmax1 = math_max_f32(vx1, vmax1);
vmax2 = math_max_f32(vx2, vmax2);
vmax3 = math_max_f32(vx3, vmax3);
}
const float vmax01 = math_max_f32(vmax0, vmax1);
const float vmax23 = math_max_f32(vmax2, vmax3);
float vmax = math_max_f32(vmax01, vmax23);
if XNN_UNLIKELY(batch != 0) {
do {
const float vx = *input++;
vmax = math_max_f32(vx, vmax);
batch -= 4;
} while (batch != 0);
}
*output = vmax;
}
| 1,207 | 23.16 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rmax/f32-rmax-sse.c
|
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/rmax.h>
void xnn_f32_rmax_ukernel__sse(
size_t batch,
const float* input,
float* output)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m128 vmax0 = _mm_load_ss(input);
vmax0 = _mm_shuffle_ps(vmax0, vmax0, _MM_SHUFFLE(0, 0, 0, 0));
__m128 vmax1 = vmax0;
__m128 vmax2 = vmax0;
__m128 vmax3 = vmax0;
for (; batch >= 64; batch -= 64) {
const __m128 vx0 = _mm_loadu_ps(input);
const __m128 vx1 = _mm_loadu_ps(input + 4);
const __m128 vx2 = _mm_loadu_ps(input + 8);
const __m128 vx3 = _mm_loadu_ps(input + 12);
input += 16;
vmax0 = _mm_max_ps(vmax0, vx0);
vmax1 = _mm_max_ps(vmax1, vx1);
vmax2 = _mm_max_ps(vmax2, vx2);
vmax3 = _mm_max_ps(vmax3, vx3);
}
__m128 vmax = _mm_max_ps(_mm_max_ps(vmax0, vmax1), _mm_max_ps(vmax2, vmax3));
for (; batch >= 16; batch -= 16) {
const __m128 vx = _mm_loadu_ps(input);
vmax = _mm_max_ps(vmax, vx);
input += 4;
}
__m128 vmax_lo = _mm_max_ps(vmax, _mm_movehl_ps(vmax, vmax));
vmax_lo = _mm_max_ss(vmax_lo, _mm_shuffle_ps(vmax_lo, vmax_lo, _MM_SHUFFLE(3, 3, 1, 1)));
if XNN_UNLIKELY(batch != 0) {
do {
vmax_lo = _mm_max_ss(vmax_lo, _mm_load_ss(input));
input += 1;
batch -= 4;
} while (batch != 0);
}
_mm_store_ss(output, vmax_lo);
}
| 1,579 | 26.719298 | 91 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rmax/f32-rmax-wasmsimd-arm.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/rmax.h>
void xnn_f32_rmax_ukernel__wasmsimd_arm(
size_t batch,
const float* input,
float* output)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmax0 = wasm_v128_load32_splat(input);
v128_t vmax1 = vmax0;
v128_t vmax2 = vmax0;
v128_t vmax3 = vmax0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vx0 = wasm_v128_load(input);
const v128_t vx1 = wasm_v128_load(input + 4);
const v128_t vx2 = wasm_v128_load(input + 8);
const v128_t vx3 = wasm_v128_load(input + 12);
input += 16;
vmax0 = wasm_f32x4_max(vmax0, vx0);
vmax1 = wasm_f32x4_max(vmax1, vx1);
vmax2 = wasm_f32x4_max(vmax2, vx2);
vmax3 = wasm_f32x4_max(vmax3, vx3);
}
v128_t vmax0123 = wasm_f32x4_max(wasm_f32x4_max(vmax0, vmax1), wasm_f32x4_max(vmax2, vmax3));
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
vmax0123 = wasm_f32x4_max(vmax0123, vx);
input += 4;
}
vmax0123 = wasm_f32x4_max(vmax0123, wasm_v32x4_shuffle(vmax0123, vmax0123, 2, 3, 0, 1));
float vmax = __builtin_wasm_max_f32(wasm_f32x4_extract_lane(vmax0123, 0), wasm_f32x4_extract_lane(vmax0123, 1));
if XNN_UNLIKELY(batch != 0) {
do {
const float vx = *input++;
vmax = __builtin_wasm_max_f32(vx, vmax);
batch -= sizeof(float);
} while (batch != 0);
}
*output = vmax;
}
| 1,705 | 29.464286 | 114 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rmax/f32-rmax-wasmsimd-x86.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/rmax.h>
void xnn_f32_rmax_ukernel__wasmsimd_x86(
size_t batch,
const float* input,
float* output)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmax0 = wasm_v128_load32_splat(input);
v128_t vmax1 = vmax0;
v128_t vmax2 = vmax0;
v128_t vmax3 = vmax0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vx0 = wasm_v128_load(input);
const v128_t vx1 = wasm_v128_load(input + 4);
const v128_t vx2 = wasm_v128_load(input + 8);
const v128_t vx3 = wasm_v128_load(input + 12);
input += 16;
vmax0 = wasm_f32x4_pmax(vx0, vmax0);
vmax1 = wasm_f32x4_pmax(vx1, vmax1);
vmax2 = wasm_f32x4_pmax(vx2, vmax2);
vmax3 = wasm_f32x4_pmax(vx3, vmax3);
}
const v128_t vmax01 = wasm_f32x4_pmax(vmax1, vmax0);
const v128_t vmax23 = wasm_f32x4_pmax(vmax3, vmax2);
v128_t vmax0123 = wasm_f32x4_pmax(vmax23, vmax01);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
vmax0123 = wasm_f32x4_pmax(vx, vmax0123);
input += 4;
}
const v128_t vmax2301 = wasm_v32x4_shuffle(vmax0123, vmax0123, 2, 3, 0, 1);
vmax0123 = wasm_f32x4_pmax(vmax2301, vmax0123);
float vmax = math_max_f32(wasm_f32x4_extract_lane(vmax0123, 0), wasm_f32x4_extract_lane(vmax0123, 1));
if XNN_UNLIKELY(batch != 0) {
do {
const float vx = *input++;
vmax = math_max_f32(vx, vmax);
batch -= sizeof(float);
} while (batch != 0);
}
*output = vmax;
}
| 1,820 | 29.35 | 104 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-neon-x12-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__neon_x12_acc3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float32x4_t vmax0 = vld1q_dup_f32(input);
float32x4_t vmax1 = vmax0;
float32x4_t vmax2 = vmax0;
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vt0 = vld1q_f32(input); input += 4;
const float32x4_t vt1 = vld1q_f32(input); input += 4;
const float32x4_t vt2 = vld1q_f32(input); input += 4;
vmax0 = vmaxq_f32(vmax0, vt0);
vmax1 = vmaxq_f32(vmax1, vt1);
vmax2 = vmaxq_f32(vmax2, vt2);
}
vmax0 = vmaxq_f32(vmax0, vmax1);
vmax0 = vmaxq_f32(vmax0, vmax2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vt = vld1q_f32(input); input += 4;
vmax0 = vmaxq_f32(vmax0, vt);
}
float32x2_t vmax = vmax_f32(vget_low_f32(vmax0), vget_high_f32(vmax0));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const float32x2_t vt = vld1_f32(input); input += 2;
vmax = vmax_f32(vmax, vt);
}
vmax = vpmax_f32(vmax, vmax);
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const float32x2_t vt = vld1_dup_f32(input);
vmax = vmax_f32(vmax, vt);
}
vst1_lane_f32(output, vmax, 0);
}
| 1,742 | 28.542373 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-neon-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__neon_x16_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float32x4_t vmax0 = vld1q_dup_f32(input);
float32x4_t vmax1 = vmax0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vt0 = vld1q_f32(input); input += 4;
const float32x4_t vt1 = vld1q_f32(input); input += 4;
const float32x4_t vt2 = vld1q_f32(input); input += 4;
const float32x4_t vt3 = vld1q_f32(input); input += 4;
vmax0 = vmaxq_f32(vmax0, vt0);
vmax1 = vmaxq_f32(vmax1, vt1);
vmax0 = vmaxq_f32(vmax0, vt2);
vmax1 = vmaxq_f32(vmax1, vt3);
}
vmax0 = vmaxq_f32(vmax0, vmax1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vt = vld1q_f32(input); input += 4;
vmax0 = vmaxq_f32(vmax0, vt);
}
float32x2_t vmax = vmax_f32(vget_low_f32(vmax0), vget_high_f32(vmax0));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const float32x2_t vt = vld1_f32(input); input += 2;
vmax = vmax_f32(vmax, vt);
}
vmax = vpmax_f32(vmax, vmax);
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const float32x2_t vt = vld1_dup_f32(input);
vmax = vmax_f32(vmax, vt);
}
vst1_lane_f32(output, vmax, 0);
}
| 1,771 | 29.033898 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-neon-x16-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__neon_x16_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float32x4_t vmax0 = vld1q_dup_f32(input);
float32x4_t vmax1 = vmax0;
float32x4_t vmax2 = vmax0;
float32x4_t vmax3 = vmax0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vt0 = vld1q_f32(input); input += 4;
const float32x4_t vt1 = vld1q_f32(input); input += 4;
const float32x4_t vt2 = vld1q_f32(input); input += 4;
const float32x4_t vt3 = vld1q_f32(input); input += 4;
vmax0 = vmaxq_f32(vmax0, vt0);
vmax1 = vmaxq_f32(vmax1, vt1);
vmax2 = vmaxq_f32(vmax2, vt2);
vmax3 = vmaxq_f32(vmax3, vt3);
}
vmax0 = vmaxq_f32(vmax0, vmax1);
vmax2 = vmaxq_f32(vmax2, vmax3);
vmax0 = vmaxq_f32(vmax0, vmax2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vt = vld1q_f32(input); input += 4;
vmax0 = vmaxq_f32(vmax0, vt);
}
float32x2_t vmax = vmax_f32(vget_low_f32(vmax0), vget_high_f32(vmax0));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const float32x2_t vt = vld1_f32(input); input += 2;
vmax = vmax_f32(vmax, vt);
}
vmax = vpmax_f32(vmax, vmax);
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const float32x2_t vt = vld1_dup_f32(input);
vmax = vmax_f32(vmax, vt);
}
vst1_lane_f32(output, vmax, 0);
}
| 1,899 | 29.15873 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-neon-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__neon_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float32x4_t vmax0 = vld1q_dup_f32(input);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vt = vld1q_f32(input); input += 4;
vmax0 = vmaxq_f32(vmax0, vt);
}
float32x2_t vmax = vmax_f32(vget_low_f32(vmax0), vget_high_f32(vmax0));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const float32x2_t vt = vld1_f32(input); input += 2;
vmax = vmax_f32(vmax, vt);
}
vmax = vpmax_f32(vmax, vmax);
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const float32x2_t vt = vld1_dup_f32(input);
vmax = vmax_f32(vmax, vt);
}
vst1_lane_f32(output, vmax, 0);
}
| 1,255 | 26.304348 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-neon-x8-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__neon_x8_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float32x4_t vmax0 = vld1q_dup_f32(input);
float32x4_t vmax1 = vmax0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vt0 = vld1q_f32(input); input += 4;
const float32x4_t vt1 = vld1q_f32(input); input += 4;
vmax0 = vmaxq_f32(vmax0, vt0);
vmax1 = vmaxq_f32(vmax1, vt1);
}
vmax0 = vmaxq_f32(vmax0, vmax1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vt = vld1q_f32(input); input += 4;
vmax0 = vmaxq_f32(vmax0, vt);
}
float32x2_t vmax = vmax_f32(vget_low_f32(vmax0), vget_high_f32(vmax0));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const float32x2_t vt = vld1_f32(input); input += 2;
vmax = vmax_f32(vmax, vt);
}
vmax = vpmax_f32(vmax, vmax);
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const float32x2_t vt = vld1_dup_f32(input);
vmax = vmax_f32(vmax, vt);
}
vst1_lane_f32(output, vmax, 0);
}
| 1,582 | 27.781818 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__scalar_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmax0 = *input;
do {
const float vt = *input++;
vmax0 = math_max_f32(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
output[0] = vmax0;
}
| 816 | 21.694444 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-scalar-x2-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__scalar_x2_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmax0 = *input;
float vmax1 = vmax0;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
input += 2;
vmax0 = math_max_f32(vmax0, vt0);
vmax1 = math_max_f32(vmax1, vt1);
}
vmax0 = math_max_f32(vmax0, vmax1);
if XNN_UNLIKELY(batch != 0) {
const float vt = *input;
vmax0 = math_max_f32(vmax0, vt);
}
output[0] = vmax0;
}
| 1,086 | 22.630435 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-scalar-x3-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__scalar_x3_acc3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmax0 = *input;
float vmax1 = vmax0;
float vmax2 = vmax0;
for (; batch >= 3 * sizeof(float); batch -= 3 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
const float vt2 = input[2];
input += 3;
vmax0 = math_max_f32(vmax0, vt0);
vmax1 = math_max_f32(vmax1, vt1);
vmax2 = math_max_f32(vmax2, vt2);
}
vmax0 = math_max_f32(vmax0, vmax1);
vmax0 = math_max_f32(vmax0, vmax2);
if XNN_UNLIKELY(batch != 0) {
do {
const float vt = *input++;
vmax0 = math_max_f32(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
output[0] = vmax0;
}
| 1,288 | 23.320755 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-scalar-x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__scalar_x4_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmax0 = *input;
float vmax1 = vmax0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
const float vt2 = input[2];
const float vt3 = input[3];
input += 4;
vmax0 = math_max_f32(vmax0, vt0);
vmax1 = math_max_f32(vmax1, vt1);
vmax0 = math_max_f32(vmax0, vt2);
vmax1 = math_max_f32(vmax1, vt3);
}
vmax0 = math_max_f32(vmax0, vmax1);
if XNN_UNLIKELY(batch != 0) {
do {
const float vt = *input++;
vmax0 = math_max_f32(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
output[0] = vmax0;
}
| 1,297 | 23.490566 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-scalar-x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__scalar_x4_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmax0 = *input;
float vmax1 = vmax0;
float vmax2 = vmax0;
float vmax3 = vmax0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
const float vt2 = input[2];
const float vt3 = input[3];
input += 4;
vmax0 = math_max_f32(vmax0, vt0);
vmax1 = math_max_f32(vmax1, vt1);
vmax2 = math_max_f32(vmax2, vt2);
vmax3 = math_max_f32(vmax3, vt3);
}
vmax0 = math_max_f32(vmax0, vmax1);
vmax2 = math_max_f32(vmax2, vmax3);
vmax0 = math_max_f32(vmax0, vmax2);
if XNN_UNLIKELY(batch != 0) {
do {
const float vt = *input++;
vmax0 = math_max_f32(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
output[0] = vmax0;
}
| 1,419 | 23.912281 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-sse-x12-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/sse.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__sse_x12_acc3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m128 vmax0 = _mm_load_ss(input);
vmax0 = _mm_shuffle_ps(vmax0, vmax0, _MM_SHUFFLE(0, 0, 0, 0));
__m128 vmax1 = vmax0;
__m128 vmax2 = vmax0;
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const __m128 vt0 = _mm_loadu_ps(input);
const __m128 vt1 = _mm_loadu_ps(input + 4);
const __m128 vt2 = _mm_loadu_ps(input + 8);
input += 12;
vmax0 = _mm_max_ps(vmax0, vt0);
vmax1 = _mm_max_ps(vmax1, vt1);
vmax2 = _mm_max_ps(vmax2, vt2);
}
vmax0 = _mm_max_ps(vmax0, vmax1);
vmax0 = _mm_max_ps(vmax0, vmax2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vt = _mm_loadu_ps(input);
input += 4;
vmax0 = _mm_max_ps(vmax0, vt);
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128 vt = _mm_load_ss(input);
input += 1;
vmax0 = _mm_max_ss(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
vmax0 = _mm_max_ps(vmax0, _mm_movehl_ps(vmax0, vmax0));
vmax0 = _mm_max_ss(vmax0, _mm_shuffle_ps(vmax0, vmax0, _MM_SHUFFLE(1, 1, 1, 1)));
_mm_store_ss(output , vmax0);
}
| 1,743 | 26.68254 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-sse-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/sse.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__sse_x16_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m128 vmax0 = _mm_load_ss(input);
vmax0 = _mm_shuffle_ps(vmax0, vmax0, _MM_SHUFFLE(0, 0, 0, 0));
__m128 vmax1 = vmax0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m128 vt0 = _mm_loadu_ps(input);
const __m128 vt1 = _mm_loadu_ps(input + 4);
const __m128 vt2 = _mm_loadu_ps(input + 8);
const __m128 vt3 = _mm_loadu_ps(input + 12);
input += 16;
vmax0 = _mm_max_ps(vmax0, vt0);
vmax1 = _mm_max_ps(vmax1, vt1);
vmax0 = _mm_max_ps(vmax0, vt2);
vmax1 = _mm_max_ps(vmax1, vt3);
}
vmax0 = _mm_max_ps(vmax0, vmax1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vt = _mm_loadu_ps(input);
input += 4;
vmax0 = _mm_max_ps(vmax0, vt);
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128 vt = _mm_load_ss(input);
input += 1;
vmax0 = _mm_max_ss(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
vmax0 = _mm_max_ps(vmax0, _mm_movehl_ps(vmax0, vmax0));
vmax0 = _mm_max_ss(vmax0, _mm_shuffle_ps(vmax0, vmax0, _MM_SHUFFLE(1, 1, 1, 1)));
_mm_store_ss(output , vmax0);
}
| 1,768 | 27.079365 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-sse-x16-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/sse.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__sse_x16_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m128 vmax0 = _mm_load_ss(input);
vmax0 = _mm_shuffle_ps(vmax0, vmax0, _MM_SHUFFLE(0, 0, 0, 0));
__m128 vmax1 = vmax0;
__m128 vmax2 = vmax0;
__m128 vmax3 = vmax0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m128 vt0 = _mm_loadu_ps(input);
const __m128 vt1 = _mm_loadu_ps(input + 4);
const __m128 vt2 = _mm_loadu_ps(input + 8);
const __m128 vt3 = _mm_loadu_ps(input + 12);
input += 16;
vmax0 = _mm_max_ps(vmax0, vt0);
vmax1 = _mm_max_ps(vmax1, vt1);
vmax2 = _mm_max_ps(vmax2, vt2);
vmax3 = _mm_max_ps(vmax3, vt3);
}
vmax0 = _mm_max_ps(vmax0, vmax1);
vmax2 = _mm_max_ps(vmax2, vmax3);
vmax0 = _mm_max_ps(vmax0, vmax2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vt = _mm_loadu_ps(input);
input += 4;
vmax0 = _mm_max_ps(vmax0, vt);
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128 vt = _mm_load_ss(input);
input += 1;
vmax0 = _mm_max_ss(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
vmax0 = _mm_max_ps(vmax0, _mm_movehl_ps(vmax0, vmax0));
vmax0 = _mm_max_ss(vmax0, _mm_shuffle_ps(vmax0, vmax0, _MM_SHUFFLE(1, 1, 1, 1)));
_mm_store_ss(output , vmax0);
}
| 1,888 | 27.19403 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-sse-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/sse.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__sse_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m128 vmax0 = _mm_load_ss(input);
vmax0 = _mm_shuffle_ps(vmax0, vmax0, _MM_SHUFFLE(0, 0, 0, 0));
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vt = _mm_loadu_ps(input);
input += 4;
vmax0 = _mm_max_ps(vmax0, vt);
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128 vt = _mm_load_ss(input);
input += 1;
vmax0 = _mm_max_ss(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
vmax0 = _mm_max_ps(vmax0, _mm_movehl_ps(vmax0, vmax0));
vmax0 = _mm_max_ss(vmax0, _mm_shuffle_ps(vmax0, vmax0, _MM_SHUFFLE(1, 1, 1, 1)));
_mm_store_ss(output , vmax0);
}
| 1,278 | 25.102041 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-sse-x8-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/sse.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__sse_x8_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m128 vmax0 = _mm_load_ss(input);
vmax0 = _mm_shuffle_ps(vmax0, vmax0, _MM_SHUFFLE(0, 0, 0, 0));
__m128 vmax1 = vmax0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vt0 = _mm_loadu_ps(input);
const __m128 vt1 = _mm_loadu_ps(input + 4);
input += 8;
vmax0 = _mm_max_ps(vmax0, vt0);
vmax1 = _mm_max_ps(vmax1, vt1);
}
vmax0 = _mm_max_ps(vmax0, vmax1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vt = _mm_loadu_ps(input);
input += 4;
vmax0 = _mm_max_ps(vmax0, vt);
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128 vt = _mm_load_ss(input);
input += 1;
vmax0 = _mm_max_ss(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
vmax0 = _mm_max_ps(vmax0, _mm_movehl_ps(vmax0, vmax0));
vmax0 = _mm_max_ss(vmax0, _mm_shuffle_ps(vmax0, vmax0, _MM_SHUFFLE(1, 1, 1, 1)));
_mm_store_ss(output , vmax0);
}
| 1,595 | 26.050847 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-wasm-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__wasm_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmax0 = *input;
do {
const float vt = *input++;
vmax0 = __builtin_wasm_max_f32(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
output[0] = vmax0;
}
| 798 | 21.828571 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-wasm-x2-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__wasm_x2_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmax0 = *input;
float vmax1 = vmax0;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
input += 2;
vmax0 = __builtin_wasm_max_f32(vmax0, vt0);
vmax1 = __builtin_wasm_max_f32(vmax1, vt1);
}
vmax0 = __builtin_wasm_max_f32(vmax0, vmax1);
if XNN_UNLIKELY(batch != 0) {
const float vt = *input;
vmax0 = __builtin_wasm_max_f32(vmax0, vt);
}
output[0] = vmax0;
}
| 1,098 | 23.422222 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-wasm-x3-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__wasm_x3_acc3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmax0 = *input;
float vmax1 = vmax0;
float vmax2 = vmax0;
for (; batch >= 3 * sizeof(float); batch -= 3 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
const float vt2 = input[2];
input += 3;
vmax0 = __builtin_wasm_max_f32(vmax0, vt0);
vmax1 = __builtin_wasm_max_f32(vmax1, vt1);
vmax2 = __builtin_wasm_max_f32(vmax2, vt2);
}
vmax0 = __builtin_wasm_max_f32(vmax0, vmax1);
vmax0 = __builtin_wasm_max_f32(vmax0, vmax2);
if XNN_UNLIKELY(batch != 0) {
do {
const float vt = *input++;
vmax0 = __builtin_wasm_max_f32(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
output[0] = vmax0;
}
| 1,320 | 24.403846 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-wasm-x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__wasm_x4_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmax0 = *input;
float vmax1 = vmax0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
const float vt2 = input[2];
const float vt3 = input[3];
input += 4;
vmax0 = __builtin_wasm_max_f32(vmax0, vt0);
vmax1 = __builtin_wasm_max_f32(vmax1, vt1);
vmax0 = __builtin_wasm_max_f32(vmax0, vt2);
vmax1 = __builtin_wasm_max_f32(vmax1, vt3);
}
vmax0 = __builtin_wasm_max_f32(vmax0, vmax1);
if XNN_UNLIKELY(batch != 0) {
do {
const float vt = *input++;
vmax0 = __builtin_wasm_max_f32(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
output[0] = vmax0;
}
| 1,329 | 24.576923 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-wasm-x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__wasm_x4_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmax0 = *input;
float vmax1 = vmax0;
float vmax2 = vmax0;
float vmax3 = vmax0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
const float vt2 = input[2];
const float vt3 = input[3];
input += 4;
vmax0 = __builtin_wasm_max_f32(vmax0, vt0);
vmax1 = __builtin_wasm_max_f32(vmax1, vt1);
vmax2 = __builtin_wasm_max_f32(vmax2, vt2);
vmax3 = __builtin_wasm_max_f32(vmax3, vt3);
}
vmax0 = __builtin_wasm_max_f32(vmax0, vmax1);
vmax2 = __builtin_wasm_max_f32(vmax2, vmax3);
vmax0 = __builtin_wasm_max_f32(vmax0, vmax2);
if XNN_UNLIKELY(batch != 0) {
do {
const float vt = *input++;
vmax0 = __builtin_wasm_max_f32(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
output[0] = vmax0;
}
| 1,471 | 25.285714 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-wasmsimd-minmax-x12-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__wasmsimd_minmax_x12_acc3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmax0 = wasm_v128_load32_splat(input);
v128_t vmax1 = vmax0;
v128_t vmax2 = vmax0;
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
const v128_t vt2 = wasm_v128_load(input + 8);
input += 12;
vmax0 = wasm_f32x4_max(vmax0, vt0);
vmax1 = wasm_f32x4_max(vmax1, vt1);
vmax2 = wasm_f32x4_max(vmax2, vt2);
}
vmax0 = wasm_f32x4_max(vmax0, vmax1);
vmax0 = wasm_f32x4_max(vmax0, vmax2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmax0 = wasm_f32x4_max(vmax0, vt);
}
vmax0 = wasm_f32x4_max(vmax0, wasm_v64x2_shuffle(vmax0, vmax0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmax0 = wasm_f32x4_max(vmax0, vt);
}
vmax0 = wasm_f32x4_max(vmax0, wasm_v32x4_shuffle(vmax0, vmax0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmax0 = wasm_f32x4_max(vmax0, vt);
}
wasm_v128_store32_lane(output, vmax0, 0);
}
| 1,867 | 28.650794 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-wasmsimd-minmax-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__wasmsimd_minmax_x16_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmax0 = wasm_v128_load32_splat(input);
v128_t vmax1 = vmax0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
const v128_t vt2 = wasm_v128_load(input + 8);
const v128_t vt3 = wasm_v128_load(input + 12);
input += 16;
vmax0 = wasm_f32x4_max(vmax0, vt0);
vmax1 = wasm_f32x4_max(vmax1, vt1);
vmax0 = wasm_f32x4_max(vmax0, vt2);
vmax1 = wasm_f32x4_max(vmax1, vt3);
}
vmax0 = wasm_f32x4_max(vmax0, vmax1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmax0 = wasm_f32x4_max(vmax0, vt);
}
vmax0 = wasm_f32x4_max(vmax0, wasm_v64x2_shuffle(vmax0, vmax0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmax0 = wasm_f32x4_max(vmax0, vt);
}
vmax0 = wasm_f32x4_max(vmax0, wasm_v32x4_shuffle(vmax0, vmax0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmax0 = wasm_f32x4_max(vmax0, vt);
}
wasm_v128_store32_lane(output, vmax0, 0);
}
| 1,894 | 29.079365 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-wasmsimd-minmax-x16-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__wasmsimd_minmax_x16_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmax0 = wasm_v128_load32_splat(input);
v128_t vmax1 = vmax0;
v128_t vmax2 = vmax0;
v128_t vmax3 = vmax0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
const v128_t vt2 = wasm_v128_load(input + 8);
const v128_t vt3 = wasm_v128_load(input + 12);
input += 16;
vmax0 = wasm_f32x4_max(vmax0, vt0);
vmax1 = wasm_f32x4_max(vmax1, vt1);
vmax2 = wasm_f32x4_max(vmax2, vt2);
vmax3 = wasm_f32x4_max(vmax3, vt3);
}
vmax0 = wasm_f32x4_max(vmax0, vmax1);
vmax2 = wasm_f32x4_max(vmax2, vmax3);
vmax0 = wasm_f32x4_max(vmax0, vmax2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmax0 = wasm_f32x4_max(vmax0, vt);
}
vmax0 = wasm_f32x4_max(vmax0, wasm_v64x2_shuffle(vmax0, vmax0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmax0 = wasm_f32x4_max(vmax0, vt);
}
vmax0 = wasm_f32x4_max(vmax0, wasm_v32x4_shuffle(vmax0, vmax0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmax0 = wasm_f32x4_max(vmax0, vt);
}
wasm_v128_store32_lane(output, vmax0, 0);
}
| 2,022 | 29.19403 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-wasmsimd-minmax-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__wasmsimd_minmax_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmax0 = wasm_v128_load32_splat(input);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmax0 = wasm_f32x4_max(vmax0, vt);
}
vmax0 = wasm_f32x4_max(vmax0, wasm_v64x2_shuffle(vmax0, vmax0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmax0 = wasm_f32x4_max(vmax0, vt);
}
vmax0 = wasm_f32x4_max(vmax0, wasm_v32x4_shuffle(vmax0, vmax0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmax0 = wasm_f32x4_max(vmax0, vt);
}
wasm_v128_store32_lane(output, vmax0, 0);
}
| 1,376 | 27.102041 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-wasmsimd-minmax-x8-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__wasmsimd_minmax_x8_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmax0 = wasm_v128_load32_splat(input);
v128_t vmax1 = vmax0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
input += 8;
vmax0 = wasm_f32x4_max(vmax0, vt0);
vmax1 = wasm_f32x4_max(vmax1, vt1);
}
vmax0 = wasm_f32x4_max(vmax0, vmax1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmax0 = wasm_f32x4_max(vmax0, vt);
}
vmax0 = wasm_f32x4_max(vmax0, wasm_v64x2_shuffle(vmax0, vmax0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmax0 = wasm_f32x4_max(vmax0, vt);
}
vmax0 = wasm_f32x4_max(vmax0, wasm_v32x4_shuffle(vmax0, vmax0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmax0 = wasm_f32x4_max(vmax0, vt);
}
wasm_v128_store32_lane(output, vmax0, 0);
}
| 1,709 | 27.983051 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-wasmsimd-pminmax-x12-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__wasmsimd_pminmax_x12_acc3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmax0 = wasm_v128_load32_splat(input);
v128_t vmax1 = vmax0;
v128_t vmax2 = vmax0;
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
const v128_t vt2 = wasm_v128_load(input + 8);
input += 12;
vmax0 = wasm_f32x4_pmax(vmax0, vt0);
vmax1 = wasm_f32x4_pmax(vmax1, vt1);
vmax2 = wasm_f32x4_pmax(vmax2, vt2);
}
vmax0 = wasm_f32x4_pmax(vmax0, vmax1);
vmax0 = wasm_f32x4_pmax(vmax0, vmax2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
vmax0 = wasm_f32x4_pmax(vmax0, wasm_v64x2_shuffle(vmax0, vmax0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
vmax0 = wasm_f32x4_pmax(vmax0, wasm_v32x4_shuffle(vmax0, vmax0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
wasm_v128_store32_lane(output, vmax0, 0);
}
| 1,878 | 28.825397 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-wasmsimd-pminmax-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__wasmsimd_pminmax_x16_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmax0 = wasm_v128_load32_splat(input);
v128_t vmax1 = vmax0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
const v128_t vt2 = wasm_v128_load(input + 8);
const v128_t vt3 = wasm_v128_load(input + 12);
input += 16;
vmax0 = wasm_f32x4_pmax(vmax0, vt0);
vmax1 = wasm_f32x4_pmax(vmax1, vt1);
vmax0 = wasm_f32x4_pmax(vmax0, vt2);
vmax1 = wasm_f32x4_pmax(vmax1, vt3);
}
vmax0 = wasm_f32x4_pmax(vmax0, vmax1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
vmax0 = wasm_f32x4_pmax(vmax0, wasm_v64x2_shuffle(vmax0, vmax0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
vmax0 = wasm_f32x4_pmax(vmax0, wasm_v32x4_shuffle(vmax0, vmax0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
wasm_v128_store32_lane(output, vmax0, 0);
}
| 1,905 | 29.253968 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-wasmsimd-pminmax-x16-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__wasmsimd_pminmax_x16_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmax0 = wasm_v128_load32_splat(input);
v128_t vmax1 = vmax0;
v128_t vmax2 = vmax0;
v128_t vmax3 = vmax0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
const v128_t vt2 = wasm_v128_load(input + 8);
const v128_t vt3 = wasm_v128_load(input + 12);
input += 16;
vmax0 = wasm_f32x4_pmax(vmax0, vt0);
vmax1 = wasm_f32x4_pmax(vmax1, vt1);
vmax2 = wasm_f32x4_pmax(vmax2, vt2);
vmax3 = wasm_f32x4_pmax(vmax3, vt3);
}
vmax0 = wasm_f32x4_pmax(vmax0, vmax1);
vmax2 = wasm_f32x4_pmax(vmax2, vmax3);
vmax0 = wasm_f32x4_pmax(vmax0, vmax2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
vmax0 = wasm_f32x4_pmax(vmax0, wasm_v64x2_shuffle(vmax0, vmax0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
vmax0 = wasm_f32x4_pmax(vmax0, wasm_v32x4_shuffle(vmax0, vmax0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
wasm_v128_store32_lane(output, vmax0, 0);
}
| 2,035 | 29.38806 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-wasmsimd-pminmax-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__wasmsimd_pminmax_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmax0 = wasm_v128_load32_splat(input);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
vmax0 = wasm_f32x4_pmax(vmax0, wasm_v64x2_shuffle(vmax0, vmax0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
vmax0 = wasm_f32x4_pmax(vmax0, wasm_v32x4_shuffle(vmax0, vmax0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
wasm_v128_store32_lane(output, vmax0, 0);
}
| 1,382 | 27.22449 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmax-wasmsimd-pminmax-x8-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmax_ukernel__wasmsimd_pminmax_x8_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmax0 = wasm_v128_load32_splat(input);
v128_t vmax1 = vmax0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
input += 8;
vmax0 = wasm_f32x4_pmax(vmax0, vt0);
vmax1 = wasm_f32x4_pmax(vmax1, vt1);
}
vmax0 = wasm_f32x4_pmax(vmax0, vmax1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
vmax0 = wasm_f32x4_pmax(vmax0, wasm_v64x2_shuffle(vmax0, vmax0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
vmax0 = wasm_f32x4_pmax(vmax0, wasm_v32x4_shuffle(vmax0, vmax0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
wasm_v128_store32_lane(output, vmax0, 0);
}
| 1,718 | 28.135593 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-neon-x12-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__neon_x12_acc3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float32x4_t vmin0 = vld1q_dup_f32(input);
float32x4_t vmin1 = vmin0;
float32x4_t vmin2 = vmin0;
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vt0 = vld1q_f32(input); input += 4;
const float32x4_t vt1 = vld1q_f32(input); input += 4;
const float32x4_t vt2 = vld1q_f32(input); input += 4;
vmin0 = vminq_f32(vmin0, vt0);
vmin1 = vminq_f32(vmin1, vt1);
vmin2 = vminq_f32(vmin2, vt2);
}
vmin0 = vminq_f32(vmin0, vmin1);
vmin0 = vminq_f32(vmin0, vmin2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vt = vld1q_f32(input); input += 4;
vmin0 = vminq_f32(vmin0, vt);
}
float32x2_t vmin = vmin_f32(vget_low_f32(vmin0), vget_high_f32(vmin0));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const float32x2_t vt = vld1_f32(input); input += 2;
vmin = vmin_f32(vmin, vt);
}
vmin = vpmin_f32(vmin, vmin);
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const float32x2_t vt = vld1_dup_f32(input);
vmin = vmin_f32(vmin, vt);
}
vst1_lane_f32(output, vmin, 0);
}
| 1,742 | 28.542373 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-neon-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__neon_x16_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float32x4_t vmin0 = vld1q_dup_f32(input);
float32x4_t vmin1 = vmin0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vt0 = vld1q_f32(input); input += 4;
const float32x4_t vt1 = vld1q_f32(input); input += 4;
const float32x4_t vt2 = vld1q_f32(input); input += 4;
const float32x4_t vt3 = vld1q_f32(input); input += 4;
vmin0 = vminq_f32(vmin0, vt0);
vmin1 = vminq_f32(vmin1, vt1);
vmin0 = vminq_f32(vmin0, vt2);
vmin1 = vminq_f32(vmin1, vt3);
}
vmin0 = vminq_f32(vmin0, vmin1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vt = vld1q_f32(input); input += 4;
vmin0 = vminq_f32(vmin0, vt);
}
float32x2_t vmin = vmin_f32(vget_low_f32(vmin0), vget_high_f32(vmin0));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const float32x2_t vt = vld1_f32(input); input += 2;
vmin = vmin_f32(vmin, vt);
}
vmin = vpmin_f32(vmin, vmin);
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const float32x2_t vt = vld1_dup_f32(input);
vmin = vmin_f32(vmin, vt);
}
vst1_lane_f32(output, vmin, 0);
}
| 1,771 | 29.033898 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-neon-x16-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__neon_x16_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float32x4_t vmin0 = vld1q_dup_f32(input);
float32x4_t vmin1 = vmin0;
float32x4_t vmin2 = vmin0;
float32x4_t vmin3 = vmin0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vt0 = vld1q_f32(input); input += 4;
const float32x4_t vt1 = vld1q_f32(input); input += 4;
const float32x4_t vt2 = vld1q_f32(input); input += 4;
const float32x4_t vt3 = vld1q_f32(input); input += 4;
vmin0 = vminq_f32(vmin0, vt0);
vmin1 = vminq_f32(vmin1, vt1);
vmin2 = vminq_f32(vmin2, vt2);
vmin3 = vminq_f32(vmin3, vt3);
}
vmin0 = vminq_f32(vmin0, vmin1);
vmin2 = vminq_f32(vmin2, vmin3);
vmin0 = vminq_f32(vmin0, vmin2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vt = vld1q_f32(input); input += 4;
vmin0 = vminq_f32(vmin0, vt);
}
float32x2_t vmin = vmin_f32(vget_low_f32(vmin0), vget_high_f32(vmin0));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const float32x2_t vt = vld1_f32(input); input += 2;
vmin = vmin_f32(vmin, vt);
}
vmin = vpmin_f32(vmin, vmin);
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const float32x2_t vt = vld1_dup_f32(input);
vmin = vmin_f32(vmin, vt);
}
vst1_lane_f32(output, vmin, 0);
}
| 1,899 | 29.15873 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-neon-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__neon_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float32x4_t vmin0 = vld1q_dup_f32(input);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vt = vld1q_f32(input); input += 4;
vmin0 = vminq_f32(vmin0, vt);
}
float32x2_t vmin = vmin_f32(vget_low_f32(vmin0), vget_high_f32(vmin0));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const float32x2_t vt = vld1_f32(input); input += 2;
vmin = vmin_f32(vmin, vt);
}
vmin = vpmin_f32(vmin, vmin);
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const float32x2_t vt = vld1_dup_f32(input);
vmin = vmin_f32(vmin, vt);
}
vst1_lane_f32(output, vmin, 0);
}
| 1,255 | 26.304348 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-neon-x8-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__neon_x8_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float32x4_t vmin0 = vld1q_dup_f32(input);
float32x4_t vmin1 = vmin0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vt0 = vld1q_f32(input); input += 4;
const float32x4_t vt1 = vld1q_f32(input); input += 4;
vmin0 = vminq_f32(vmin0, vt0);
vmin1 = vminq_f32(vmin1, vt1);
}
vmin0 = vminq_f32(vmin0, vmin1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vt = vld1q_f32(input); input += 4;
vmin0 = vminq_f32(vmin0, vt);
}
float32x2_t vmin = vmin_f32(vget_low_f32(vmin0), vget_high_f32(vmin0));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const float32x2_t vt = vld1_f32(input); input += 2;
vmin = vmin_f32(vmin, vt);
}
vmin = vpmin_f32(vmin, vmin);
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const float32x2_t vt = vld1_dup_f32(input);
vmin = vmin_f32(vmin, vt);
}
vst1_lane_f32(output, vmin, 0);
}
| 1,582 | 27.781818 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__scalar_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmin0 = *input;
do {
const float vt = *input++;
vmin0 = math_min_f32(vmin0, vt);
batch -= sizeof(float);
} while (batch != 0);
output[0] = vmin0;
}
| 816 | 21.694444 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-scalar-x2-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__scalar_x2_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmin0 = *input;
float vmin1 = vmin0;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
input += 2;
vmin0 = math_min_f32(vmin0, vt0);
vmin1 = math_min_f32(vmin1, vt1);
}
vmin0 = math_min_f32(vmin0, vmin1);
if XNN_UNLIKELY(batch != 0) {
const float vt = *input;
vmin0 = math_min_f32(vmin0, vt);
}
output[0] = vmin0;
}
| 1,086 | 22.630435 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-scalar-x3-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__scalar_x3_acc3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmin0 = *input;
float vmin1 = vmin0;
float vmin2 = vmin0;
for (; batch >= 3 * sizeof(float); batch -= 3 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
const float vt2 = input[2];
input += 3;
vmin0 = math_min_f32(vmin0, vt0);
vmin1 = math_min_f32(vmin1, vt1);
vmin2 = math_min_f32(vmin2, vt2);
}
vmin0 = math_min_f32(vmin0, vmin1);
vmin0 = math_min_f32(vmin0, vmin2);
if XNN_UNLIKELY(batch != 0) {
do {
const float vt = *input++;
vmin0 = math_min_f32(vmin0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
output[0] = vmin0;
}
| 1,288 | 23.320755 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-scalar-x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__scalar_x4_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmin0 = *input;
float vmin1 = vmin0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
const float vt2 = input[2];
const float vt3 = input[3];
input += 4;
vmin0 = math_min_f32(vmin0, vt0);
vmin1 = math_min_f32(vmin1, vt1);
vmin0 = math_min_f32(vmin0, vt2);
vmin1 = math_min_f32(vmin1, vt3);
}
vmin0 = math_min_f32(vmin0, vmin1);
if XNN_UNLIKELY(batch != 0) {
do {
const float vt = *input++;
vmin0 = math_min_f32(vmin0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
output[0] = vmin0;
}
| 1,297 | 23.490566 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-scalar-x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__scalar_x4_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmin0 = *input;
float vmin1 = vmin0;
float vmin2 = vmin0;
float vmin3 = vmin0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
const float vt2 = input[2];
const float vt3 = input[3];
input += 4;
vmin0 = math_min_f32(vmin0, vt0);
vmin1 = math_min_f32(vmin1, vt1);
vmin2 = math_min_f32(vmin2, vt2);
vmin3 = math_min_f32(vmin3, vt3);
}
vmin0 = math_min_f32(vmin0, vmin1);
vmin2 = math_min_f32(vmin2, vmin3);
vmin0 = math_min_f32(vmin0, vmin2);
if XNN_UNLIKELY(batch != 0) {
do {
const float vt = *input++;
vmin0 = math_min_f32(vmin0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
output[0] = vmin0;
}
| 1,419 | 23.912281 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-sse-x12-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/sse.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__sse_x12_acc3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m128 vmin0 = _mm_load_ss(input);
vmin0 = _mm_shuffle_ps(vmin0, vmin0, _MM_SHUFFLE(0, 0, 0, 0));
__m128 vmin1 = vmin0;
__m128 vmin2 = vmin0;
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const __m128 vt0 = _mm_loadu_ps(input);
const __m128 vt1 = _mm_loadu_ps(input + 4);
const __m128 vt2 = _mm_loadu_ps(input + 8);
input += 12;
vmin0 = _mm_min_ps(vmin0, vt0);
vmin1 = _mm_min_ps(vmin1, vt1);
vmin2 = _mm_min_ps(vmin2, vt2);
}
vmin0 = _mm_min_ps(vmin0, vmin1);
vmin0 = _mm_min_ps(vmin0, vmin2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vt = _mm_loadu_ps(input);
input += 4;
vmin0 = _mm_min_ps(vmin0, vt);
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128 vt = _mm_load_ss(input);
input += 1;
vmin0 = _mm_min_ss(vmin0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
vmin0 = _mm_min_ps(vmin0, _mm_movehl_ps(vmin0, vmin0));
vmin0 = _mm_min_ss(vmin0, _mm_shuffle_ps(vmin0, vmin0, _MM_SHUFFLE(1, 1, 1, 1)));
_mm_store_ss(output, vmin0);
}
| 1,742 | 26.666667 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-sse-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/sse.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__sse_x16_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m128 vmin0 = _mm_load_ss(input);
vmin0 = _mm_shuffle_ps(vmin0, vmin0, _MM_SHUFFLE(0, 0, 0, 0));
__m128 vmin1 = vmin0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m128 vt0 = _mm_loadu_ps(input);
const __m128 vt1 = _mm_loadu_ps(input + 4);
const __m128 vt2 = _mm_loadu_ps(input + 8);
const __m128 vt3 = _mm_loadu_ps(input + 12);
input += 16;
vmin0 = _mm_min_ps(vmin0, vt0);
vmin1 = _mm_min_ps(vmin1, vt1);
vmin0 = _mm_min_ps(vmin0, vt2);
vmin1 = _mm_min_ps(vmin1, vt3);
}
vmin0 = _mm_min_ps(vmin0, vmin1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vt = _mm_loadu_ps(input);
input += 4;
vmin0 = _mm_min_ps(vmin0, vt);
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128 vt = _mm_load_ss(input);
input += 1;
vmin0 = _mm_min_ss(vmin0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
vmin0 = _mm_min_ps(vmin0, _mm_movehl_ps(vmin0, vmin0));
vmin0 = _mm_min_ss(vmin0, _mm_shuffle_ps(vmin0, vmin0, _MM_SHUFFLE(1, 1, 1, 1)));
_mm_store_ss(output, vmin0);
}
| 1,767 | 27.063492 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-sse-x16-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/sse.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__sse_x16_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m128 vmin0 = _mm_load_ss(input);
vmin0 = _mm_shuffle_ps(vmin0, vmin0, _MM_SHUFFLE(0, 0, 0, 0));
__m128 vmin1 = vmin0;
__m128 vmin2 = vmin0;
__m128 vmin3 = vmin0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m128 vt0 = _mm_loadu_ps(input);
const __m128 vt1 = _mm_loadu_ps(input + 4);
const __m128 vt2 = _mm_loadu_ps(input + 8);
const __m128 vt3 = _mm_loadu_ps(input + 12);
input += 16;
vmin0 = _mm_min_ps(vmin0, vt0);
vmin1 = _mm_min_ps(vmin1, vt1);
vmin2 = _mm_min_ps(vmin2, vt2);
vmin3 = _mm_min_ps(vmin3, vt3);
}
vmin0 = _mm_min_ps(vmin0, vmin1);
vmin2 = _mm_min_ps(vmin2, vmin3);
vmin0 = _mm_min_ps(vmin0, vmin2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vt = _mm_loadu_ps(input);
input += 4;
vmin0 = _mm_min_ps(vmin0, vt);
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128 vt = _mm_load_ss(input);
input += 1;
vmin0 = _mm_min_ss(vmin0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
vmin0 = _mm_min_ps(vmin0, _mm_movehl_ps(vmin0, vmin0));
vmin0 = _mm_min_ss(vmin0, _mm_shuffle_ps(vmin0, vmin0, _MM_SHUFFLE(1, 1, 1, 1)));
_mm_store_ss(output, vmin0);
}
| 1,887 | 27.179104 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-sse-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/sse.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__sse_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m128 vmin0 = _mm_load_ss(input);
vmin0 = _mm_shuffle_ps(vmin0, vmin0, _MM_SHUFFLE(0, 0, 0, 0));
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vt = _mm_loadu_ps(input);
input += 4;
vmin0 = _mm_min_ps(vmin0, vt);
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128 vt = _mm_load_ss(input);
input += 1;
vmin0 = _mm_min_ss(vmin0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
vmin0 = _mm_min_ps(vmin0, _mm_movehl_ps(vmin0, vmin0));
vmin0 = _mm_min_ss(vmin0, _mm_shuffle_ps(vmin0, vmin0, _MM_SHUFFLE(1, 1, 1, 1)));
_mm_store_ss(output, vmin0);
}
| 1,277 | 25.081633 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-sse-x8-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/sse.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__sse_x8_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m128 vmin0 = _mm_load_ss(input);
vmin0 = _mm_shuffle_ps(vmin0, vmin0, _MM_SHUFFLE(0, 0, 0, 0));
__m128 vmin1 = vmin0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vt0 = _mm_loadu_ps(input);
const __m128 vt1 = _mm_loadu_ps(input + 4);
input += 8;
vmin0 = _mm_min_ps(vmin0, vt0);
vmin1 = _mm_min_ps(vmin1, vt1);
}
vmin0 = _mm_min_ps(vmin0, vmin1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vt = _mm_loadu_ps(input);
input += 4;
vmin0 = _mm_min_ps(vmin0, vt);
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128 vt = _mm_load_ss(input);
input += 1;
vmin0 = _mm_min_ss(vmin0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
vmin0 = _mm_min_ps(vmin0, _mm_movehl_ps(vmin0, vmin0));
vmin0 = _mm_min_ss(vmin0, _mm_shuffle_ps(vmin0, vmin0, _MM_SHUFFLE(1, 1, 1, 1)));
_mm_store_ss(output, vmin0);
}
| 1,594 | 26.033898 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-wasm-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__wasm_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmin0 = *input;
do {
const float vt = *input++;
vmin0 = __builtin_wasm_min_f32(vmin0, vt);
batch -= sizeof(float);
} while (batch != 0);
output[0] = vmin0;
}
| 798 | 21.828571 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-wasm-x2-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__wasm_x2_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmin0 = *input;
float vmin1 = vmin0;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
input += 2;
vmin0 = __builtin_wasm_min_f32(vmin0, vt0);
vmin1 = __builtin_wasm_min_f32(vmin1, vt1);
}
vmin0 = __builtin_wasm_min_f32(vmin0, vmin1);
if XNN_UNLIKELY(batch != 0) {
const float vt = *input;
vmin0 = __builtin_wasm_min_f32(vmin0, vt);
}
output[0] = vmin0;
}
| 1,098 | 23.422222 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-wasm-x3-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__wasm_x3_acc3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmin0 = *input;
float vmin1 = vmin0;
float vmin2 = vmin0;
for (; batch >= 3 * sizeof(float); batch -= 3 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
const float vt2 = input[2];
input += 3;
vmin0 = __builtin_wasm_min_f32(vmin0, vt0);
vmin1 = __builtin_wasm_min_f32(vmin1, vt1);
vmin2 = __builtin_wasm_min_f32(vmin2, vt2);
}
vmin0 = __builtin_wasm_min_f32(vmin0, vmin1);
vmin0 = __builtin_wasm_min_f32(vmin0, vmin2);
if XNN_UNLIKELY(batch != 0) {
do {
const float vt = *input++;
vmin0 = __builtin_wasm_min_f32(vmin0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
output[0] = vmin0;
}
| 1,320 | 24.403846 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-wasm-x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__wasm_x4_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmin0 = *input;
float vmin1 = vmin0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
const float vt2 = input[2];
const float vt3 = input[3];
input += 4;
vmin0 = __builtin_wasm_min_f32(vmin0, vt0);
vmin1 = __builtin_wasm_min_f32(vmin1, vt1);
vmin0 = __builtin_wasm_min_f32(vmin0, vt2);
vmin1 = __builtin_wasm_min_f32(vmin1, vt3);
}
vmin0 = __builtin_wasm_min_f32(vmin0, vmin1);
if XNN_UNLIKELY(batch != 0) {
do {
const float vt = *input++;
vmin0 = __builtin_wasm_min_f32(vmin0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
output[0] = vmin0;
}
| 1,329 | 24.576923 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-wasm-x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__wasm_x4_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmin0 = *input;
float vmin1 = vmin0;
float vmin2 = vmin0;
float vmin3 = vmin0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
const float vt2 = input[2];
const float vt3 = input[3];
input += 4;
vmin0 = __builtin_wasm_min_f32(vmin0, vt0);
vmin1 = __builtin_wasm_min_f32(vmin1, vt1);
vmin2 = __builtin_wasm_min_f32(vmin2, vt2);
vmin3 = __builtin_wasm_min_f32(vmin3, vt3);
}
vmin0 = __builtin_wasm_min_f32(vmin0, vmin1);
vmin2 = __builtin_wasm_min_f32(vmin2, vmin3);
vmin0 = __builtin_wasm_min_f32(vmin0, vmin2);
if XNN_UNLIKELY(batch != 0) {
do {
const float vt = *input++;
vmin0 = __builtin_wasm_min_f32(vmin0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
output[0] = vmin0;
}
| 1,471 | 25.285714 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-wasmsimd-minmax-x12-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__wasmsimd_minmax_x12_acc3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmin0 = wasm_v128_load32_splat(input);
v128_t vmin1 = vmin0;
v128_t vmin2 = vmin0;
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
const v128_t vt2 = wasm_v128_load(input + 8);
input += 12;
vmin0 = wasm_f32x4_min(vmin0, vt0);
vmin1 = wasm_f32x4_min(vmin1, vt1);
vmin2 = wasm_f32x4_min(vmin2, vt2);
}
vmin0 = wasm_f32x4_min(vmin0, vmin1);
vmin0 = wasm_f32x4_min(vmin0, vmin2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmin0 = wasm_f32x4_min(vmin0, vt);
}
vmin0 = wasm_f32x4_min(vmin0, wasm_v64x2_shuffle(vmin0, vmin0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmin0 = wasm_f32x4_min(vmin0, vt);
}
vmin0 = wasm_f32x4_min(vmin0, wasm_v32x4_shuffle(vmin0, vmin0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmin0 = wasm_f32x4_min(vmin0, vt);
}
wasm_v128_store32_lane(output, vmin0, 0);
}
| 1,867 | 28.650794 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-wasmsimd-minmax-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__wasmsimd_minmax_x16_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmin0 = wasm_v128_load32_splat(input);
v128_t vmin1 = vmin0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
const v128_t vt2 = wasm_v128_load(input + 8);
const v128_t vt3 = wasm_v128_load(input + 12);
input += 16;
vmin0 = wasm_f32x4_min(vmin0, vt0);
vmin1 = wasm_f32x4_min(vmin1, vt1);
vmin0 = wasm_f32x4_min(vmin0, vt2);
vmin1 = wasm_f32x4_min(vmin1, vt3);
}
vmin0 = wasm_f32x4_min(vmin0, vmin1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmin0 = wasm_f32x4_min(vmin0, vt);
}
vmin0 = wasm_f32x4_min(vmin0, wasm_v64x2_shuffle(vmin0, vmin0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmin0 = wasm_f32x4_min(vmin0, vt);
}
vmin0 = wasm_f32x4_min(vmin0, wasm_v32x4_shuffle(vmin0, vmin0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmin0 = wasm_f32x4_min(vmin0, vt);
}
wasm_v128_store32_lane(output, vmin0, 0);
}
| 1,894 | 29.079365 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-wasmsimd-minmax-x16-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__wasmsimd_minmax_x16_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmin0 = wasm_v128_load32_splat(input);
v128_t vmin1 = vmin0;
v128_t vmin2 = vmin0;
v128_t vmin3 = vmin0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
const v128_t vt2 = wasm_v128_load(input + 8);
const v128_t vt3 = wasm_v128_load(input + 12);
input += 16;
vmin0 = wasm_f32x4_min(vmin0, vt0);
vmin1 = wasm_f32x4_min(vmin1, vt1);
vmin2 = wasm_f32x4_min(vmin2, vt2);
vmin3 = wasm_f32x4_min(vmin3, vt3);
}
vmin0 = wasm_f32x4_min(vmin0, vmin1);
vmin2 = wasm_f32x4_min(vmin2, vmin3);
vmin0 = wasm_f32x4_min(vmin0, vmin2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmin0 = wasm_f32x4_min(vmin0, vt);
}
vmin0 = wasm_f32x4_min(vmin0, wasm_v64x2_shuffle(vmin0, vmin0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmin0 = wasm_f32x4_min(vmin0, vt);
}
vmin0 = wasm_f32x4_min(vmin0, wasm_v32x4_shuffle(vmin0, vmin0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmin0 = wasm_f32x4_min(vmin0, vt);
}
wasm_v128_store32_lane(output, vmin0, 0);
}
| 2,022 | 29.19403 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-wasmsimd-minmax-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__wasmsimd_minmax_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmin0 = wasm_v128_load32_splat(input);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmin0 = wasm_f32x4_min(vmin0, vt);
}
vmin0 = wasm_f32x4_min(vmin0, wasm_v64x2_shuffle(vmin0, vmin0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmin0 = wasm_f32x4_min(vmin0, vt);
}
vmin0 = wasm_f32x4_min(vmin0, wasm_v32x4_shuffle(vmin0, vmin0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmin0 = wasm_f32x4_min(vmin0, vt);
}
wasm_v128_store32_lane(output, vmin0, 0);
}
| 1,376 | 27.102041 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-wasmsimd-minmax-x8-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__wasmsimd_minmax_x8_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmin0 = wasm_v128_load32_splat(input);
v128_t vmin1 = vmin0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
input += 8;
vmin0 = wasm_f32x4_min(vmin0, vt0);
vmin1 = wasm_f32x4_min(vmin1, vt1);
}
vmin0 = wasm_f32x4_min(vmin0, vmin1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmin0 = wasm_f32x4_min(vmin0, vt);
}
vmin0 = wasm_f32x4_min(vmin0, wasm_v64x2_shuffle(vmin0, vmin0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmin0 = wasm_f32x4_min(vmin0, vt);
}
vmin0 = wasm_f32x4_min(vmin0, wasm_v32x4_shuffle(vmin0, vmin0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmin0 = wasm_f32x4_min(vmin0, vt);
}
wasm_v128_store32_lane(output, vmin0, 0);
}
| 1,709 | 27.983051 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-wasmsimd-pminmax-x12-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__wasmsimd_pminmax_x12_acc3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmin0 = wasm_v128_load32_splat(input);
v128_t vmin1 = vmin0;
v128_t vmin2 = vmin0;
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
const v128_t vt2 = wasm_v128_load(input + 8);
input += 12;
vmin0 = wasm_f32x4_pmin(vmin0, vt0);
vmin1 = wasm_f32x4_pmin(vmin1, vt1);
vmin2 = wasm_f32x4_pmin(vmin2, vt2);
}
vmin0 = wasm_f32x4_pmin(vmin0, vmin1);
vmin0 = wasm_f32x4_pmin(vmin0, vmin2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmin0 = wasm_f32x4_pmin(vmin0, vt);
}
vmin0 = wasm_f32x4_pmin(vmin0, wasm_v64x2_shuffle(vmin0, vmin0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmin0 = wasm_f32x4_pmin(vmin0, vt);
}
vmin0 = wasm_f32x4_pmin(vmin0, wasm_v32x4_shuffle(vmin0, vmin0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmin0 = wasm_f32x4_pmin(vmin0, vt);
}
wasm_v128_store32_lane(output, vmin0, 0);
}
| 1,878 | 28.825397 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-wasmsimd-pminmax-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__wasmsimd_pminmax_x16_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmin0 = wasm_v128_load32_splat(input);
v128_t vmin1 = vmin0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
const v128_t vt2 = wasm_v128_load(input + 8);
const v128_t vt3 = wasm_v128_load(input + 12);
input += 16;
vmin0 = wasm_f32x4_pmin(vmin0, vt0);
vmin1 = wasm_f32x4_pmin(vmin1, vt1);
vmin0 = wasm_f32x4_pmin(vmin0, vt2);
vmin1 = wasm_f32x4_pmin(vmin1, vt3);
}
vmin0 = wasm_f32x4_pmin(vmin0, vmin1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmin0 = wasm_f32x4_pmin(vmin0, vt);
}
vmin0 = wasm_f32x4_pmin(vmin0, wasm_v64x2_shuffle(vmin0, vmin0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmin0 = wasm_f32x4_pmin(vmin0, vt);
}
vmin0 = wasm_f32x4_pmin(vmin0, wasm_v32x4_shuffle(vmin0, vmin0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmin0 = wasm_f32x4_pmin(vmin0, vt);
}
wasm_v128_store32_lane(output, vmin0, 0);
}
| 1,905 | 29.253968 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-wasmsimd-pminmax-x16-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__wasmsimd_pminmax_x16_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmin0 = wasm_v128_load32_splat(input);
v128_t vmin1 = vmin0;
v128_t vmin2 = vmin0;
v128_t vmin3 = vmin0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
const v128_t vt2 = wasm_v128_load(input + 8);
const v128_t vt3 = wasm_v128_load(input + 12);
input += 16;
vmin0 = wasm_f32x4_pmin(vmin0, vt0);
vmin1 = wasm_f32x4_pmin(vmin1, vt1);
vmin2 = wasm_f32x4_pmin(vmin2, vt2);
vmin3 = wasm_f32x4_pmin(vmin3, vt3);
}
vmin0 = wasm_f32x4_pmin(vmin0, vmin1);
vmin2 = wasm_f32x4_pmin(vmin2, vmin3);
vmin0 = wasm_f32x4_pmin(vmin0, vmin2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmin0 = wasm_f32x4_pmin(vmin0, vt);
}
vmin0 = wasm_f32x4_pmin(vmin0, wasm_v64x2_shuffle(vmin0, vmin0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmin0 = wasm_f32x4_pmin(vmin0, vt);
}
vmin0 = wasm_f32x4_pmin(vmin0, wasm_v32x4_shuffle(vmin0, vmin0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmin0 = wasm_f32x4_pmin(vmin0, vt);
}
wasm_v128_store32_lane(output, vmin0, 0);
}
| 2,035 | 29.38806 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-wasmsimd-pminmax-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__wasmsimd_pminmax_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmin0 = wasm_v128_load32_splat(input);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmin0 = wasm_f32x4_pmin(vmin0, vt);
}
vmin0 = wasm_f32x4_pmin(vmin0, wasm_v64x2_shuffle(vmin0, vmin0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmin0 = wasm_f32x4_pmin(vmin0, vt);
}
vmin0 = wasm_f32x4_pmin(vmin0, wasm_v32x4_shuffle(vmin0, vmin0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmin0 = wasm_f32x4_pmin(vmin0, vt);
}
wasm_v128_store32_lane(output, vmin0, 0);
}
| 1,382 | 27.22449 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rmin-wasmsimd-pminmax-x8-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rmin_ukernel__wasmsimd_pminmax_x8_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmin0 = wasm_v128_load32_splat(input);
v128_t vmin1 = vmin0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
input += 8;
vmin0 = wasm_f32x4_pmin(vmin0, vt0);
vmin1 = wasm_f32x4_pmin(vmin1, vt1);
}
vmin0 = wasm_f32x4_pmin(vmin0, vmin1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmin0 = wasm_f32x4_pmin(vmin0, vt);
}
vmin0 = wasm_f32x4_pmin(vmin0, wasm_v64x2_shuffle(vmin0, vmin0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmin0 = wasm_f32x4_pmin(vmin0, vt);
}
vmin0 = wasm_f32x4_pmin(vmin0, wasm_v32x4_shuffle(vmin0, vmin0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmin0 = wasm_f32x4_pmin(vmin0, vt);
}
wasm_v128_store32_lane(output, vmin0, 0);
}
| 1,718 | 28.135593 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-neon-x12-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__neon_x12_acc3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float32x4_t vmin0 = vld1q_dup_f32(input);
float32x4_t vmax0 = vmin0;
float32x4_t vmin1 = vmin0;
float32x4_t vmax1 = vmax0;
float32x4_t vmin2 = vmin0;
float32x4_t vmax2 = vmax0;
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const float32x4_t vt0 = vld1q_f32(input); input += 4;
const float32x4_t vt1 = vld1q_f32(input); input += 4;
const float32x4_t vt2 = vld1q_f32(input); input += 4;
vmin0 = vminq_f32(vmin0, vt0);
vmax0 = vmaxq_f32(vmax0, vt0);
vmin1 = vminq_f32(vmin1, vt1);
vmax1 = vmaxq_f32(vmax1, vt1);
vmin2 = vminq_f32(vmin2, vt2);
vmax2 = vmaxq_f32(vmax2, vt2);
}
vmin0 = vminq_f32(vmin0, vmin1);
vmax0 = vmaxq_f32(vmax0, vmax1);
vmin0 = vminq_f32(vmin0, vmin2);
vmax0 = vmaxq_f32(vmax0, vmax2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vt = vld1q_f32(input); input += 4;
vmin0 = vminq_f32(vmin0, vt);
vmax0 = vmaxq_f32(vmax0, vt);
}
float32x2_t vmin = vmin_f32(vget_low_f32(vmin0), vget_high_f32(vmin0));
float32x2_t vmax = vmax_f32(vget_low_f32(vmax0), vget_high_f32(vmax0));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const float32x2_t vt = vld1_f32(input); input += 2;
vmin = vmin_f32(vmin, vt);
vmax = vmax_f32(vmax, vt);
}
vmin = vpmin_f32(vmin, vmin);
vmax = vpmax_f32(vmax, vmax);
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const float32x2_t vt = vld1_dup_f32(input);
vmin = vmin_f32(vmin, vt);
vmax = vmax_f32(vmax, vt);
}
vst1_lane_f32(output, vmin, 0);
vst1_lane_f32(output + 1, vmax, 0);
}
| 2,247 | 29.794521 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-neon-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__neon_x16_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float32x4_t vmin0 = vld1q_dup_f32(input);
float32x4_t vmax0 = vmin0;
float32x4_t vmin1 = vmin0;
float32x4_t vmax1 = vmax0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vt0 = vld1q_f32(input); input += 4;
const float32x4_t vt1 = vld1q_f32(input); input += 4;
const float32x4_t vt2 = vld1q_f32(input); input += 4;
const float32x4_t vt3 = vld1q_f32(input); input += 4;
vmin0 = vminq_f32(vmin0, vt0);
vmax0 = vmaxq_f32(vmax0, vt0);
vmin1 = vminq_f32(vmin1, vt1);
vmax1 = vmaxq_f32(vmax1, vt1);
vmin0 = vminq_f32(vmin0, vt2);
vmax0 = vmaxq_f32(vmax0, vt2);
vmin1 = vminq_f32(vmin1, vt3);
vmax1 = vmaxq_f32(vmax1, vt3);
}
vmin0 = vminq_f32(vmin0, vmin1);
vmax0 = vmaxq_f32(vmax0, vmax1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vt = vld1q_f32(input); input += 4;
vmin0 = vminq_f32(vmin0, vt);
vmax0 = vmaxq_f32(vmax0, vt);
}
float32x2_t vmin = vmin_f32(vget_low_f32(vmin0), vget_high_f32(vmin0));
float32x2_t vmax = vmax_f32(vget_low_f32(vmax0), vget_high_f32(vmax0));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const float32x2_t vt = vld1_f32(input); input += 2;
vmin = vmin_f32(vmin, vt);
vmax = vmax_f32(vmax, vt);
}
vmin = vpmin_f32(vmin, vmin);
vmax = vpmax_f32(vmax, vmax);
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const float32x2_t vt = vld1_dup_f32(input);
vmin = vmin_f32(vmin, vt);
vmax = vmax_f32(vmax, vt);
}
vst1_lane_f32(output, vmin, 0);
vst1_lane_f32(output + 1, vmax, 0);
}
| 2,247 | 30.222222 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-neon-x16-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__neon_x16_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float32x4_t vmin0 = vld1q_dup_f32(input);
float32x4_t vmax0 = vmin0;
float32x4_t vmin1 = vmin0;
float32x4_t vmax1 = vmax0;
float32x4_t vmin2 = vmin0;
float32x4_t vmax2 = vmax0;
float32x4_t vmin3 = vmin0;
float32x4_t vmax3 = vmax0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const float32x4_t vt0 = vld1q_f32(input); input += 4;
const float32x4_t vt1 = vld1q_f32(input); input += 4;
const float32x4_t vt2 = vld1q_f32(input); input += 4;
const float32x4_t vt3 = vld1q_f32(input); input += 4;
vmin0 = vminq_f32(vmin0, vt0);
vmax0 = vmaxq_f32(vmax0, vt0);
vmin1 = vminq_f32(vmin1, vt1);
vmax1 = vmaxq_f32(vmax1, vt1);
vmin2 = vminq_f32(vmin2, vt2);
vmax2 = vmaxq_f32(vmax2, vt2);
vmin3 = vminq_f32(vmin3, vt3);
vmax3 = vmaxq_f32(vmax3, vt3);
}
vmin0 = vminq_f32(vmin0, vmin1);
vmax0 = vmaxq_f32(vmax0, vmax1);
vmin2 = vminq_f32(vmin2, vmin3);
vmax2 = vmaxq_f32(vmax2, vmax3);
vmin0 = vminq_f32(vmin0, vmin2);
vmax0 = vmaxq_f32(vmax0, vmax2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vt = vld1q_f32(input); input += 4;
vmin0 = vminq_f32(vmin0, vt);
vmax0 = vmaxq_f32(vmax0, vt);
}
float32x2_t vmin = vmin_f32(vget_low_f32(vmin0), vget_high_f32(vmin0));
float32x2_t vmax = vmax_f32(vget_low_f32(vmax0), vget_high_f32(vmax0));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const float32x2_t vt = vld1_f32(input); input += 2;
vmin = vmin_f32(vmin, vt);
vmax = vmax_f32(vmax, vt);
}
vmin = vpmin_f32(vmin, vmin);
vmax = vpmax_f32(vmax, vmax);
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const float32x2_t vt = vld1_dup_f32(input);
vmin = vmin_f32(vmin, vt);
vmax = vmax_f32(vmax, vt);
}
vst1_lane_f32(output, vmin, 0);
vst1_lane_f32(output + 1, vmax, 0);
}
| 2,503 | 30.3 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-neon-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__neon_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float32x4_t vmin0 = vld1q_dup_f32(input);
float32x4_t vmax0 = vmin0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vt = vld1q_f32(input); input += 4;
vmin0 = vminq_f32(vmin0, vt);
vmax0 = vmaxq_f32(vmax0, vt);
}
float32x2_t vmin = vmin_f32(vget_low_f32(vmin0), vget_high_f32(vmin0));
float32x2_t vmax = vmax_f32(vget_low_f32(vmax0), vget_high_f32(vmax0));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const float32x2_t vt = vld1_f32(input); input += 2;
vmin = vmin_f32(vmin, vt);
vmax = vmax_f32(vmax, vt);
}
vmin = vpmin_f32(vmin, vmin);
vmax = vpmax_f32(vmax, vmax);
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const float32x2_t vt = vld1_dup_f32(input);
vmin = vmin_f32(vmin, vt);
vmax = vmax_f32(vmax, vt);
}
vst1_lane_f32(output, vmin, 0);
vst1_lane_f32(output + 1, vmax, 0);
}
| 1,527 | 27.830189 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-neon-x8-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__neon_x8_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float32x4_t vmin0 = vld1q_dup_f32(input);
float32x4_t vmax0 = vmin0;
float32x4_t vmin1 = vmin0;
float32x4_t vmax1 = vmax0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vt0 = vld1q_f32(input); input += 4;
const float32x4_t vt1 = vld1q_f32(input); input += 4;
vmin0 = vminq_f32(vmin0, vt0);
vmax0 = vmaxq_f32(vmax0, vt0);
vmin1 = vminq_f32(vmin1, vt1);
vmax1 = vmaxq_f32(vmax1, vt1);
}
vmin0 = vminq_f32(vmin0, vmin1);
vmax0 = vmaxq_f32(vmax0, vmax1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vt = vld1q_f32(input); input += 4;
vmin0 = vminq_f32(vmin0, vt);
vmax0 = vmaxq_f32(vmax0, vt);
}
float32x2_t vmin = vmin_f32(vget_low_f32(vmin0), vget_high_f32(vmin0));
float32x2_t vmax = vmax_f32(vget_low_f32(vmax0), vget_high_f32(vmax0));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const float32x2_t vt = vld1_f32(input); input += 2;
vmin = vmin_f32(vmin, vt);
vmax = vmax_f32(vmax, vt);
}
vmin = vpmin_f32(vmin, vmin);
vmax = vpmax_f32(vmax, vmax);
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const float32x2_t vt = vld1_dup_f32(input);
vmin = vmin_f32(vmin, vt);
vmax = vmax_f32(vmax, vt);
}
vst1_lane_f32(output, vmin, 0);
vst1_lane_f32(output + 1, vmax, 0);
}
| 1,988 | 29.136364 | 73 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__scalar_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmin0 = *input;
float vmax0 = *input;
do {
const float vt = *input++;
vmin0 = math_min_f32(vmin0, vt);
vmax0 = math_max_f32(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
output[0] = vmin0;
output[1] = vmax0;
}
| 901 | 22.128205 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-scalar-x2-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__scalar_x2_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmin0 = *input;
float vmax0 = *input;
float vmin1 = vmin0;
float vmax1 = vmax0;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
input += 2;
vmin0 = math_min_f32(vmin0, vt0);
vmax0 = math_max_f32(vmax0, vt0);
vmin1 = math_min_f32(vmin1, vt1);
vmax1 = math_max_f32(vmax1, vt1);
}
vmin0 = math_min_f32(vmin0, vmin1);
vmax0 = math_max_f32(vmax0, vmax1);
if XNN_UNLIKELY(batch != 0) {
const float vt = *input;
vmin0 = math_min_f32(vmin0, vt);
vmax0 = math_max_f32(vmax0, vt);
}
output[0] = vmin0;
output[1] = vmax0;
}
| 1,308 | 23.698113 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-scalar-x3-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__scalar_x3_acc3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmin0 = *input;
float vmax0 = *input;
float vmin1 = vmin0;
float vmax1 = vmax0;
float vmin2 = vmin0;
float vmax2 = vmax0;
for (; batch >= 3 * sizeof(float); batch -= 3 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
const float vt2 = input[2];
input += 3;
vmin0 = math_min_f32(vmin0, vt0);
vmax0 = math_max_f32(vmax0, vt0);
vmin1 = math_min_f32(vmin1, vt1);
vmax1 = math_max_f32(vmax1, vt1);
vmin2 = math_min_f32(vmin2, vt2);
vmax2 = math_max_f32(vmax2, vt2);
}
vmin0 = math_min_f32(vmin0, vmin1);
vmax0 = math_max_f32(vmax0, vmax1);
vmin0 = math_min_f32(vmin0, vmin2);
vmax0 = math_max_f32(vmax0, vmax2);
if XNN_UNLIKELY(batch != 0) {
do {
const float vt = *input++;
vmin0 = math_min_f32(vmin0, vt);
vmax0 = math_max_f32(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
output[0] = vmin0;
output[1] = vmax0;
}
| 1,611 | 24.587302 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-scalar-x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__scalar_x4_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmin0 = *input;
float vmax0 = *input;
float vmin1 = vmin0;
float vmax1 = vmax0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
const float vt2 = input[2];
const float vt3 = input[3];
input += 4;
vmin0 = math_min_f32(vmin0, vt0);
vmax0 = math_max_f32(vmax0, vt0);
vmin1 = math_min_f32(vmin1, vt1);
vmax1 = math_max_f32(vmax1, vt1);
vmin0 = math_min_f32(vmin0, vt2);
vmax0 = math_max_f32(vmax0, vt2);
vmin1 = math_min_f32(vmin1, vt3);
vmax1 = math_max_f32(vmax1, vt3);
}
vmin0 = math_min_f32(vmin0, vmin1);
vmax0 = math_max_f32(vmax0, vmax1);
if XNN_UNLIKELY(batch != 0) {
do {
const float vt = *input++;
vmin0 = math_min_f32(vmin0, vt);
vmax0 = math_max_f32(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
output[0] = vmin0;
output[1] = vmax0;
}
| 1,597 | 24.774194 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-scalar-x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__scalar_x4_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmin0 = *input;
float vmax0 = *input;
float vmin1 = vmin0;
float vmax1 = vmax0;
float vmin2 = vmin0;
float vmax2 = vmax0;
float vmin3 = vmin0;
float vmax3 = vmax0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
const float vt2 = input[2];
const float vt3 = input[3];
input += 4;
vmin0 = math_min_f32(vmin0, vt0);
vmax0 = math_max_f32(vmax0, vt0);
vmin1 = math_min_f32(vmin1, vt1);
vmax1 = math_max_f32(vmax1, vt1);
vmin2 = math_min_f32(vmin2, vt2);
vmax2 = math_max_f32(vmax2, vt2);
vmin3 = math_min_f32(vmin3, vt3);
vmax3 = math_max_f32(vmax3, vt3);
}
vmin0 = math_min_f32(vmin0, vmin1);
vmax0 = math_max_f32(vmax0, vmax1);
vmin2 = math_min_f32(vmin2, vmin3);
vmax2 = math_max_f32(vmax2, vmax3);
vmin0 = math_min_f32(vmin0, vmin2);
vmax0 = math_max_f32(vmax0, vmax2);
if XNN_UNLIKELY(batch != 0) {
do {
const float vt = *input++;
vmin0 = math_min_f32(vmin0, vt);
vmax0 = math_max_f32(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
output[0] = vmin0;
output[1] = vmax0;
}
| 1,841 | 25.314286 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-sse-x12-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/sse.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__sse_x12_acc3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m128 vmin0 = _mm_load_ss(input);
vmin0 = _mm_shuffle_ps(vmin0, vmin0, _MM_SHUFFLE(0, 0, 0, 0));
__m128 vmax0 = vmin0;
__m128 vmin1 = vmin0;
__m128 vmax1 = vmax0;
__m128 vmin2 = vmin0;
__m128 vmax2 = vmax0;
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const __m128 vt0 = _mm_loadu_ps(input);
const __m128 vt1 = _mm_loadu_ps(input + 4);
const __m128 vt2 = _mm_loadu_ps(input + 8);
input += 12;
vmin0 = _mm_min_ps(vmin0, vt0);
vmax0 = _mm_max_ps(vmax0, vt0);
vmin1 = _mm_min_ps(vmin1, vt1);
vmax1 = _mm_max_ps(vmax1, vt1);
vmin2 = _mm_min_ps(vmin2, vt2);
vmax2 = _mm_max_ps(vmax2, vt2);
}
vmin0 = _mm_min_ps(vmin0, vmin1);
vmax0 = _mm_max_ps(vmax0, vmax1);
vmin0 = _mm_min_ps(vmin0, vmin2);
vmax0 = _mm_max_ps(vmax0, vmax2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vt = _mm_loadu_ps(input);
input += 4;
vmin0 = _mm_min_ps(vmin0, vt);
vmax0 = _mm_max_ps(vmax0, vt);
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128 vt = _mm_load_ss(input);
input += 1;
vmin0 = _mm_min_ss(vmin0, vt);
vmax0 = _mm_max_ss(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
vmin0 = _mm_min_ps(vmin0, _mm_movehl_ps(vmin0, vmin0));
vmin0 = _mm_min_ss(vmin0, _mm_shuffle_ps(vmin0, vmin0, _MM_SHUFFLE(1, 1, 1, 1)));
vmax0 = _mm_max_ps(vmax0, _mm_movehl_ps(vmax0, vmax0));
vmax0 = _mm_max_ss(vmax0, _mm_shuffle_ps(vmax0, vmax0, _MM_SHUFFLE(1, 1, 1, 1)));
_mm_store_ss(output, vmin0);
_mm_store_ss(output + 1 , vmax0);
}
| 2,247 | 28.578947 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-sse-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/sse.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__sse_x16_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m128 vmin0 = _mm_load_ss(input);
vmin0 = _mm_shuffle_ps(vmin0, vmin0, _MM_SHUFFLE(0, 0, 0, 0));
__m128 vmax0 = vmin0;
__m128 vmin1 = vmin0;
__m128 vmax1 = vmax0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m128 vt0 = _mm_loadu_ps(input);
const __m128 vt1 = _mm_loadu_ps(input + 4);
const __m128 vt2 = _mm_loadu_ps(input + 8);
const __m128 vt3 = _mm_loadu_ps(input + 12);
input += 16;
vmin0 = _mm_min_ps(vmin0, vt0);
vmax0 = _mm_max_ps(vmax0, vt0);
vmin1 = _mm_min_ps(vmin1, vt1);
vmax1 = _mm_max_ps(vmax1, vt1);
vmin0 = _mm_min_ps(vmin0, vt2);
vmax0 = _mm_max_ps(vmax0, vt2);
vmin1 = _mm_min_ps(vmin1, vt3);
vmax1 = _mm_max_ps(vmax1, vt3);
}
vmin0 = _mm_min_ps(vmin0, vmin1);
vmax0 = _mm_max_ps(vmax0, vmax1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vt = _mm_loadu_ps(input);
input += 4;
vmin0 = _mm_min_ps(vmin0, vt);
vmax0 = _mm_max_ps(vmax0, vt);
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128 vt = _mm_load_ss(input);
input += 1;
vmin0 = _mm_min_ss(vmin0, vt);
vmax0 = _mm_max_ss(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
vmin0 = _mm_min_ps(vmin0, _mm_movehl_ps(vmin0, vmin0));
vmin0 = _mm_min_ss(vmin0, _mm_shuffle_ps(vmin0, vmin0, _MM_SHUFFLE(1, 1, 1, 1)));
vmax0 = _mm_max_ps(vmax0, _mm_movehl_ps(vmax0, vmax0));
vmax0 = _mm_max_ss(vmax0, _mm_shuffle_ps(vmax0, vmax0, _MM_SHUFFLE(1, 1, 1, 1)));
_mm_store_ss(output, vmin0);
_mm_store_ss(output + 1 , vmax0);
}
| 2,248 | 28.986667 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-sse-x16-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/sse.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__sse_x16_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m128 vmin0 = _mm_load_ss(input);
vmin0 = _mm_shuffle_ps(vmin0, vmin0, _MM_SHUFFLE(0, 0, 0, 0));
__m128 vmax0 = vmin0;
__m128 vmin1 = vmin0;
__m128 vmax1 = vmax0;
__m128 vmin2 = vmin0;
__m128 vmax2 = vmax0;
__m128 vmin3 = vmin0;
__m128 vmax3 = vmax0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m128 vt0 = _mm_loadu_ps(input);
const __m128 vt1 = _mm_loadu_ps(input + 4);
const __m128 vt2 = _mm_loadu_ps(input + 8);
const __m128 vt3 = _mm_loadu_ps(input + 12);
input += 16;
vmin0 = _mm_min_ps(vmin0, vt0);
vmax0 = _mm_max_ps(vmax0, vt0);
vmin1 = _mm_min_ps(vmin1, vt1);
vmax1 = _mm_max_ps(vmax1, vt1);
vmin2 = _mm_min_ps(vmin2, vt2);
vmax2 = _mm_max_ps(vmax2, vt2);
vmin3 = _mm_min_ps(vmin3, vt3);
vmax3 = _mm_max_ps(vmax3, vt3);
}
vmin0 = _mm_min_ps(vmin0, vmin1);
vmax0 = _mm_max_ps(vmax0, vmax1);
vmin2 = _mm_min_ps(vmin2, vmin3);
vmax2 = _mm_max_ps(vmax2, vmax3);
vmin0 = _mm_min_ps(vmin0, vmin2);
vmax0 = _mm_max_ps(vmax0, vmax2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vt = _mm_loadu_ps(input);
input += 4;
vmin0 = _mm_min_ps(vmin0, vt);
vmax0 = _mm_max_ps(vmax0, vt);
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128 vt = _mm_load_ss(input);
input += 1;
vmin0 = _mm_min_ss(vmin0, vt);
vmax0 = _mm_max_ss(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
vmin0 = _mm_min_ps(vmin0, _mm_movehl_ps(vmin0, vmin0));
vmin0 = _mm_min_ss(vmin0, _mm_shuffle_ps(vmin0, vmin0, _MM_SHUFFLE(1, 1, 1, 1)));
vmax0 = _mm_max_ps(vmax0, _mm_movehl_ps(vmax0, vmax0));
vmax0 = _mm_max_ss(vmax0, _mm_shuffle_ps(vmax0, vmax0, _MM_SHUFFLE(1, 1, 1, 1)));
_mm_store_ss(output, vmin0);
_mm_store_ss(output + 1 , vmax0);
}
| 2,488 | 28.987952 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-sse-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/sse.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__sse_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m128 vmin0 = _mm_load_ss(input);
vmin0 = _mm_shuffle_ps(vmin0, vmin0, _MM_SHUFFLE(0, 0, 0, 0));
__m128 vmax0 = vmin0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vt = _mm_loadu_ps(input);
input += 4;
vmin0 = _mm_min_ps(vmin0, vt);
vmax0 = _mm_max_ps(vmax0, vt);
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128 vt = _mm_load_ss(input);
input += 1;
vmin0 = _mm_min_ss(vmin0, vt);
vmax0 = _mm_max_ss(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
vmin0 = _mm_min_ps(vmin0, _mm_movehl_ps(vmin0, vmin0));
vmin0 = _mm_min_ss(vmin0, _mm_shuffle_ps(vmin0, vmin0, _MM_SHUFFLE(1, 1, 1, 1)));
vmax0 = _mm_max_ps(vmax0, _mm_movehl_ps(vmax0, vmax0));
vmax0 = _mm_max_ss(vmax0, _mm_shuffle_ps(vmax0, vmax0, _MM_SHUFFLE(1, 1, 1, 1)));
_mm_store_ss(output, vmin0);
_mm_store_ss(output + 1 , vmax0);
}
| 1,554 | 27.272727 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-sse-x8-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/sse.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__sse_x8_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
__m128 vmin0 = _mm_load_ss(input);
vmin0 = _mm_shuffle_ps(vmin0, vmin0, _MM_SHUFFLE(0, 0, 0, 0));
__m128 vmax0 = vmin0;
__m128 vmin1 = vmin0;
__m128 vmax1 = vmax0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vt0 = _mm_loadu_ps(input);
const __m128 vt1 = _mm_loadu_ps(input + 4);
input += 8;
vmin0 = _mm_min_ps(vmin0, vt0);
vmax0 = _mm_max_ps(vmax0, vt0);
vmin1 = _mm_min_ps(vmin1, vt1);
vmax1 = _mm_max_ps(vmax1, vt1);
}
vmin0 = _mm_min_ps(vmin0, vmin1);
vmax0 = _mm_max_ps(vmax0, vmax1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vt = _mm_loadu_ps(input);
input += 4;
vmin0 = _mm_min_ps(vmin0, vt);
vmax0 = _mm_max_ps(vmax0, vt);
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128 vt = _mm_load_ss(input);
input += 1;
vmin0 = _mm_min_ss(vmin0, vt);
vmax0 = _mm_max_ss(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
vmin0 = _mm_min_ps(vmin0, _mm_movehl_ps(vmin0, vmin0));
vmin0 = _mm_min_ss(vmin0, _mm_shuffle_ps(vmin0, vmin0, _MM_SHUFFLE(1, 1, 1, 1)));
vmax0 = _mm_max_ps(vmax0, _mm_movehl_ps(vmax0, vmax0));
vmax0 = _mm_max_ss(vmax0, _mm_shuffle_ps(vmax0, vmax0, _MM_SHUFFLE(1, 1, 1, 1)));
_mm_store_ss(output, vmin0);
_mm_store_ss(output + 1 , vmax0);
}
| 2,003 | 28.043478 | 83 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-wasm-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__wasm_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmin0 = *input;
float vmax0 = *input;
do {
const float vt = *input++;
vmin0 = __builtin_wasm_min_f32(vmin0, vt);
vmax0 = __builtin_wasm_max_f32(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
output[0] = vmin0;
output[1] = vmax0;
}
| 893 | 22.526316 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-wasm-x2-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__wasm_x2_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmin0 = *input;
float vmax0 = *input;
float vmin1 = vmin0;
float vmax1 = vmax0;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
input += 2;
vmin0 = __builtin_wasm_min_f32(vmin0, vt0);
vmax0 = __builtin_wasm_max_f32(vmax0, vt0);
vmin1 = __builtin_wasm_min_f32(vmin1, vt1);
vmax1 = __builtin_wasm_max_f32(vmax1, vt1);
}
vmin0 = __builtin_wasm_min_f32(vmin0, vmin1);
vmax0 = __builtin_wasm_max_f32(vmax0, vmax1);
if XNN_UNLIKELY(batch != 0) {
const float vt = *input;
vmin0 = __builtin_wasm_min_f32(vmin0, vt);
vmax0 = __builtin_wasm_max_f32(vmax0, vt);
}
output[0] = vmin0;
output[1] = vmax0;
}
| 1,360 | 25.173077 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-wasm-x3-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__wasm_x3_acc3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmin0 = *input;
float vmax0 = *input;
float vmin1 = vmin0;
float vmax1 = vmax0;
float vmin2 = vmin0;
float vmax2 = vmax0;
for (; batch >= 3 * sizeof(float); batch -= 3 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
const float vt2 = input[2];
input += 3;
vmin0 = __builtin_wasm_min_f32(vmin0, vt0);
vmax0 = __builtin_wasm_max_f32(vmax0, vt0);
vmin1 = __builtin_wasm_min_f32(vmin1, vt1);
vmax1 = __builtin_wasm_max_f32(vmax1, vt1);
vmin2 = __builtin_wasm_min_f32(vmin2, vt2);
vmax2 = __builtin_wasm_max_f32(vmax2, vt2);
}
vmin0 = __builtin_wasm_min_f32(vmin0, vmin1);
vmax0 = __builtin_wasm_max_f32(vmax0, vmax1);
vmin0 = __builtin_wasm_min_f32(vmin0, vmin2);
vmax0 = __builtin_wasm_max_f32(vmax0, vmax2);
if XNN_UNLIKELY(batch != 0) {
do {
const float vt = *input++;
vmin0 = __builtin_wasm_min_f32(vmin0, vt);
vmax0 = __builtin_wasm_max_f32(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
output[0] = vmin0;
output[1] = vmax0;
}
| 1,703 | 26.483871 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-wasm-x4-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__wasm_x4_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmin0 = *input;
float vmax0 = *input;
float vmin1 = vmin0;
float vmax1 = vmax0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
const float vt2 = input[2];
const float vt3 = input[3];
input += 4;
vmin0 = __builtin_wasm_min_f32(vmin0, vt0);
vmax0 = __builtin_wasm_max_f32(vmax0, vt0);
vmin1 = __builtin_wasm_min_f32(vmin1, vt1);
vmax1 = __builtin_wasm_max_f32(vmax1, vt1);
vmin0 = __builtin_wasm_min_f32(vmin0, vt2);
vmax0 = __builtin_wasm_max_f32(vmax0, vt2);
vmin1 = __builtin_wasm_min_f32(vmin1, vt3);
vmax1 = __builtin_wasm_max_f32(vmax1, vt3);
}
vmin0 = __builtin_wasm_min_f32(vmin0, vmin1);
vmax0 = __builtin_wasm_max_f32(vmax0, vmax1);
if XNN_UNLIKELY(batch != 0) {
do {
const float vt = *input++;
vmin0 = __builtin_wasm_min_f32(vmin0, vt);
vmax0 = __builtin_wasm_max_f32(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
output[0] = vmin0;
output[1] = vmax0;
}
| 1,689 | 26.704918 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-wasm-x4-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__wasm_x4_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
float vmin0 = *input;
float vmax0 = *input;
float vmin1 = vmin0;
float vmax1 = vmax0;
float vmin2 = vmin0;
float vmax2 = vmax0;
float vmin3 = vmin0;
float vmax3 = vmax0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vt0 = input[0];
const float vt1 = input[1];
const float vt2 = input[2];
const float vt3 = input[3];
input += 4;
vmin0 = __builtin_wasm_min_f32(vmin0, vt0);
vmax0 = __builtin_wasm_max_f32(vmax0, vt0);
vmin1 = __builtin_wasm_min_f32(vmin1, vt1);
vmax1 = __builtin_wasm_max_f32(vmax1, vt1);
vmin2 = __builtin_wasm_min_f32(vmin2, vt2);
vmax2 = __builtin_wasm_max_f32(vmax2, vt2);
vmin3 = __builtin_wasm_min_f32(vmin3, vt3);
vmax3 = __builtin_wasm_max_f32(vmax3, vt3);
}
vmin0 = __builtin_wasm_min_f32(vmin0, vmin1);
vmax0 = __builtin_wasm_max_f32(vmax0, vmax1);
vmin2 = __builtin_wasm_min_f32(vmin2, vmin3);
vmax2 = __builtin_wasm_max_f32(vmax2, vmax3);
vmin0 = __builtin_wasm_min_f32(vmin0, vmin2);
vmax0 = __builtin_wasm_max_f32(vmax0, vmax2);
if XNN_UNLIKELY(batch != 0) {
do {
const float vt = *input++;
vmin0 = __builtin_wasm_min_f32(vmin0, vt);
vmax0 = __builtin_wasm_max_f32(vmax0, vt);
batch -= sizeof(float);
} while (batch != 0);
}
output[0] = vmin0;
output[1] = vmax0;
}
| 1,973 | 27.608696 | 72 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-wasmsimd-minmax-x12-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__wasmsimd_minmax_x12_acc3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmin0 = wasm_v128_load32_splat(input);
v128_t vmax0 = vmin0;
v128_t vmin1 = vmin0;
v128_t vmax1 = vmax0;
v128_t vmin2 = vmin0;
v128_t vmax2 = vmax0;
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
const v128_t vt2 = wasm_v128_load(input + 8);
input += 12;
vmin0 = wasm_f32x4_min(vmin0, vt0);
vmax0 = wasm_f32x4_max(vmax0, vt0);
vmin1 = wasm_f32x4_min(vmin1, vt1);
vmax1 = wasm_f32x4_max(vmax1, vt1);
vmin2 = wasm_f32x4_min(vmin2, vt2);
vmax2 = wasm_f32x4_max(vmax2, vt2);
}
vmin0 = wasm_f32x4_min(vmin0, vmin1);
vmax0 = wasm_f32x4_max(vmax0, vmax1);
vmin0 = wasm_f32x4_min(vmin0, vmin2);
vmax0 = wasm_f32x4_max(vmax0, vmax2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmin0 = wasm_f32x4_min(vmin0, vt);
vmax0 = wasm_f32x4_max(vmax0, vt);
}
vmin0 = wasm_f32x4_min(vmin0, wasm_v64x2_shuffle(vmin0, vmin0, 1, 1));
vmax0 = wasm_f32x4_max(vmax0, wasm_v64x2_shuffle(vmax0, vmax0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmin0 = wasm_f32x4_min(vmin0, vt);
vmax0 = wasm_f32x4_max(vmax0, vt);
}
vmin0 = wasm_f32x4_min(vmin0, wasm_v32x4_shuffle(vmin0, vmin0, 1, 1, 1, 1));
vmax0 = wasm_f32x4_max(vmax0, wasm_v32x4_shuffle(vmax0, vmax0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmin0 = wasm_f32x4_min(vmin0, vt);
vmax0 = wasm_f32x4_max(vmax0, vt);
}
wasm_v128_store32_lane(output, vmin0, 0);
wasm_v128_store32_lane(output + 1, vmax0, 0);
}
| 2,459 | 30.948052 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-wasmsimd-minmax-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__wasmsimd_minmax_x16_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmin0 = wasm_v128_load32_splat(input);
v128_t vmax0 = vmin0;
v128_t vmin1 = vmin0;
v128_t vmax1 = vmax0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
const v128_t vt2 = wasm_v128_load(input + 8);
const v128_t vt3 = wasm_v128_load(input + 12);
input += 16;
vmin0 = wasm_f32x4_min(vmin0, vt0);
vmax0 = wasm_f32x4_max(vmax0, vt0);
vmin1 = wasm_f32x4_min(vmin1, vt1);
vmax1 = wasm_f32x4_max(vmax1, vt1);
vmin0 = wasm_f32x4_min(vmin0, vt2);
vmax0 = wasm_f32x4_max(vmax0, vt2);
vmin1 = wasm_f32x4_min(vmin1, vt3);
vmax1 = wasm_f32x4_max(vmax1, vt3);
}
vmin0 = wasm_f32x4_min(vmin0, vmin1);
vmax0 = wasm_f32x4_max(vmax0, vmax1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmin0 = wasm_f32x4_min(vmin0, vt);
vmax0 = wasm_f32x4_max(vmax0, vt);
}
vmin0 = wasm_f32x4_min(vmin0, wasm_v64x2_shuffle(vmin0, vmin0, 1, 1));
vmax0 = wasm_f32x4_max(vmax0, wasm_v64x2_shuffle(vmax0, vmax0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmin0 = wasm_f32x4_min(vmin0, vt);
vmax0 = wasm_f32x4_max(vmax0, vt);
}
vmin0 = wasm_f32x4_min(vmin0, wasm_v32x4_shuffle(vmin0, vmin0, 1, 1, 1, 1));
vmax0 = wasm_f32x4_max(vmax0, wasm_v32x4_shuffle(vmax0, vmax0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmin0 = wasm_f32x4_min(vmin0, vt);
vmax0 = wasm_f32x4_max(vmax0, vt);
}
wasm_v128_store32_lane(output, vmin0, 0);
wasm_v128_store32_lane(output + 1, vmax0, 0);
}
| 2,462 | 31.407895 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-wasmsimd-minmax-x16-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__wasmsimd_minmax_x16_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmin0 = wasm_v128_load32_splat(input);
v128_t vmax0 = vmin0;
v128_t vmin1 = vmin0;
v128_t vmax1 = vmax0;
v128_t vmin2 = vmin0;
v128_t vmax2 = vmax0;
v128_t vmin3 = vmin0;
v128_t vmax3 = vmax0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
const v128_t vt2 = wasm_v128_load(input + 8);
const v128_t vt3 = wasm_v128_load(input + 12);
input += 16;
vmin0 = wasm_f32x4_min(vmin0, vt0);
vmax0 = wasm_f32x4_max(vmax0, vt0);
vmin1 = wasm_f32x4_min(vmin1, vt1);
vmax1 = wasm_f32x4_max(vmax1, vt1);
vmin2 = wasm_f32x4_min(vmin2, vt2);
vmax2 = wasm_f32x4_max(vmax2, vt2);
vmin3 = wasm_f32x4_min(vmin3, vt3);
vmax3 = wasm_f32x4_max(vmax3, vt3);
}
vmin0 = wasm_f32x4_min(vmin0, vmin1);
vmax0 = wasm_f32x4_max(vmax0, vmax1);
vmin2 = wasm_f32x4_min(vmin2, vmin3);
vmax2 = wasm_f32x4_max(vmax2, vmax3);
vmin0 = wasm_f32x4_min(vmin0, vmin2);
vmax0 = wasm_f32x4_max(vmax0, vmax2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmin0 = wasm_f32x4_min(vmin0, vt);
vmax0 = wasm_f32x4_max(vmax0, vt);
}
vmin0 = wasm_f32x4_min(vmin0, wasm_v64x2_shuffle(vmin0, vmin0, 1, 1));
vmax0 = wasm_f32x4_max(vmax0, wasm_v64x2_shuffle(vmax0, vmax0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmin0 = wasm_f32x4_min(vmin0, vt);
vmax0 = wasm_f32x4_max(vmax0, vt);
}
vmin0 = wasm_f32x4_min(vmin0, wasm_v32x4_shuffle(vmin0, vmin0, 1, 1, 1, 1));
vmax0 = wasm_f32x4_max(vmax0, wasm_v32x4_shuffle(vmax0, vmax0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmin0 = wasm_f32x4_min(vmin0, vt);
vmax0 = wasm_f32x4_max(vmax0, vt);
}
wasm_v128_store32_lane(output, vmin0, 0);
wasm_v128_store32_lane(output + 1, vmax0, 0);
}
| 2,718 | 31.369048 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-wasmsimd-minmax-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__wasmsimd_minmax_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmin0 = wasm_v128_load32_splat(input);
v128_t vmax0 = vmin0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmin0 = wasm_f32x4_min(vmin0, vt);
vmax0 = wasm_f32x4_max(vmax0, vt);
}
vmin0 = wasm_f32x4_min(vmin0, wasm_v64x2_shuffle(vmin0, vmin0, 1, 1));
vmax0 = wasm_f32x4_max(vmax0, wasm_v64x2_shuffle(vmax0, vmax0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmin0 = wasm_f32x4_min(vmin0, vt);
vmax0 = wasm_f32x4_max(vmax0, vt);
}
vmin0 = wasm_f32x4_min(vmin0, wasm_v32x4_shuffle(vmin0, vmin0, 1, 1, 1, 1));
vmax0 = wasm_f32x4_max(vmax0, wasm_v32x4_shuffle(vmax0, vmax0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmin0 = wasm_f32x4_min(vmin0, vt);
vmax0 = wasm_f32x4_max(vmax0, vt);
}
wasm_v128_store32_lane(output, vmin0, 0);
wasm_v128_store32_lane(output + 1, vmax0, 0);
}
| 1,720 | 29.732143 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-wasmsimd-minmax-x8-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__wasmsimd_minmax_x8_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmin0 = wasm_v128_load32_splat(input);
v128_t vmax0 = vmin0;
v128_t vmin1 = vmin0;
v128_t vmax1 = vmax0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
input += 8;
vmin0 = wasm_f32x4_min(vmin0, vt0);
vmax0 = wasm_f32x4_max(vmax0, vt0);
vmin1 = wasm_f32x4_min(vmin1, vt1);
vmax1 = wasm_f32x4_max(vmax1, vt1);
}
vmin0 = wasm_f32x4_min(vmin0, vmin1);
vmax0 = wasm_f32x4_max(vmax0, vmax1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmin0 = wasm_f32x4_min(vmin0, vt);
vmax0 = wasm_f32x4_max(vmax0, vt);
}
vmin0 = wasm_f32x4_min(vmin0, wasm_v64x2_shuffle(vmin0, vmin0, 1, 1));
vmax0 = wasm_f32x4_max(vmax0, wasm_v64x2_shuffle(vmax0, vmax0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmin0 = wasm_f32x4_min(vmin0, vt);
vmax0 = wasm_f32x4_max(vmax0, vt);
}
vmin0 = wasm_f32x4_min(vmin0, wasm_v32x4_shuffle(vmin0, vmin0, 1, 1, 1, 1));
vmax0 = wasm_f32x4_max(vmax0, wasm_v32x4_shuffle(vmax0, vmax0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmin0 = wasm_f32x4_min(vmin0, vt);
vmax0 = wasm_f32x4_max(vmax0, vt);
}
wasm_v128_store32_lane(output, vmin0, 0);
wasm_v128_store32_lane(output + 1, vmax0, 0);
}
| 2,197 | 30.4 | 78 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-wasmsimd-pminmax-x12-acc3.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__wasmsimd_pminmax_x12_acc3(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmin0 = wasm_v128_load32_splat(input);
v128_t vmax0 = vmin0;
v128_t vmin1 = vmin0;
v128_t vmax1 = vmax0;
v128_t vmin2 = vmin0;
v128_t vmax2 = vmax0;
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
const v128_t vt2 = wasm_v128_load(input + 8);
input += 12;
vmin0 = wasm_f32x4_pmin(vmin0, vt0);
vmax0 = wasm_f32x4_pmax(vmax0, vt0);
vmin1 = wasm_f32x4_pmin(vmin1, vt1);
vmax1 = wasm_f32x4_pmax(vmax1, vt1);
vmin2 = wasm_f32x4_pmin(vmin2, vt2);
vmax2 = wasm_f32x4_pmax(vmax2, vt2);
}
vmin0 = wasm_f32x4_pmin(vmin0, vmin1);
vmax0 = wasm_f32x4_pmax(vmax0, vmax1);
vmin0 = wasm_f32x4_pmin(vmin0, vmin2);
vmax0 = wasm_f32x4_pmax(vmax0, vmax2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmin0 = wasm_f32x4_pmin(vmin0, vt);
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
vmin0 = wasm_f32x4_pmin(vmin0, wasm_v64x2_shuffle(vmin0, vmin0, 1, 1));
vmax0 = wasm_f32x4_pmax(vmax0, wasm_v64x2_shuffle(vmax0, vmax0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmin0 = wasm_f32x4_pmin(vmin0, vt);
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
vmin0 = wasm_f32x4_pmin(vmin0, wasm_v32x4_shuffle(vmin0, vmin0, 1, 1, 1, 1));
vmax0 = wasm_f32x4_pmax(vmax0, wasm_v32x4_shuffle(vmax0, vmax0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmin0 = wasm_f32x4_pmin(vmin0, vt);
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
wasm_v128_store32_lane(output, vmin0, 0);
wasm_v128_store32_lane(output + 1, vmax0, 0);
}
| 2,480 | 31.220779 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-wasmsimd-pminmax-x16-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__wasmsimd_pminmax_x16_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmin0 = wasm_v128_load32_splat(input);
v128_t vmax0 = vmin0;
v128_t vmin1 = vmin0;
v128_t vmax1 = vmax0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
const v128_t vt2 = wasm_v128_load(input + 8);
const v128_t vt3 = wasm_v128_load(input + 12);
input += 16;
vmin0 = wasm_f32x4_pmin(vmin0, vt0);
vmax0 = wasm_f32x4_pmax(vmax0, vt0);
vmin1 = wasm_f32x4_pmin(vmin1, vt1);
vmax1 = wasm_f32x4_pmax(vmax1, vt1);
vmin0 = wasm_f32x4_pmin(vmin0, vt2);
vmax0 = wasm_f32x4_pmax(vmax0, vt2);
vmin1 = wasm_f32x4_pmin(vmin1, vt3);
vmax1 = wasm_f32x4_pmax(vmax1, vt3);
}
vmin0 = wasm_f32x4_pmin(vmin0, vmin1);
vmax0 = wasm_f32x4_pmax(vmax0, vmax1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmin0 = wasm_f32x4_pmin(vmin0, vt);
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
vmin0 = wasm_f32x4_pmin(vmin0, wasm_v64x2_shuffle(vmin0, vmin0, 1, 1));
vmax0 = wasm_f32x4_pmax(vmax0, wasm_v64x2_shuffle(vmax0, vmax0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmin0 = wasm_f32x4_pmin(vmin0, vt);
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
vmin0 = wasm_f32x4_pmin(vmin0, wasm_v32x4_shuffle(vmin0, vmin0, 1, 1, 1, 1));
vmax0 = wasm_f32x4_pmax(vmax0, wasm_v32x4_shuffle(vmax0, vmax0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmin0 = wasm_f32x4_pmin(vmin0, vt);
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
wasm_v128_store32_lane(output, vmin0, 0);
wasm_v128_store32_lane(output + 1, vmax0, 0);
}
| 2,483 | 31.684211 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-wasmsimd-pminmax-x16-acc4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__wasmsimd_pminmax_x16_acc4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmin0 = wasm_v128_load32_splat(input);
v128_t vmax0 = vmin0;
v128_t vmin1 = vmin0;
v128_t vmax1 = vmax0;
v128_t vmin2 = vmin0;
v128_t vmax2 = vmax0;
v128_t vmin3 = vmin0;
v128_t vmax3 = vmax0;
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
const v128_t vt2 = wasm_v128_load(input + 8);
const v128_t vt3 = wasm_v128_load(input + 12);
input += 16;
vmin0 = wasm_f32x4_pmin(vmin0, vt0);
vmax0 = wasm_f32x4_pmax(vmax0, vt0);
vmin1 = wasm_f32x4_pmin(vmin1, vt1);
vmax1 = wasm_f32x4_pmax(vmax1, vt1);
vmin2 = wasm_f32x4_pmin(vmin2, vt2);
vmax2 = wasm_f32x4_pmax(vmax2, vt2);
vmin3 = wasm_f32x4_pmin(vmin3, vt3);
vmax3 = wasm_f32x4_pmax(vmax3, vt3);
}
vmin0 = wasm_f32x4_pmin(vmin0, vmin1);
vmax0 = wasm_f32x4_pmax(vmax0, vmax1);
vmin2 = wasm_f32x4_pmin(vmin2, vmin3);
vmax2 = wasm_f32x4_pmax(vmax2, vmax3);
vmin0 = wasm_f32x4_pmin(vmin0, vmin2);
vmax0 = wasm_f32x4_pmax(vmax0, vmax2);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmin0 = wasm_f32x4_pmin(vmin0, vt);
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
vmin0 = wasm_f32x4_pmin(vmin0, wasm_v64x2_shuffle(vmin0, vmin0, 1, 1));
vmax0 = wasm_f32x4_pmax(vmax0, wasm_v64x2_shuffle(vmax0, vmax0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmin0 = wasm_f32x4_pmin(vmin0, vt);
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
vmin0 = wasm_f32x4_pmin(vmin0, wasm_v32x4_shuffle(vmin0, vmin0, 1, 1, 1, 1));
vmax0 = wasm_f32x4_pmax(vmax0, wasm_v32x4_shuffle(vmax0, vmax0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmin0 = wasm_f32x4_pmin(vmin0, vt);
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
wasm_v128_store32_lane(output, vmin0, 0);
wasm_v128_store32_lane(output + 1, vmax0, 0);
}
| 2,743 | 31.666667 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-wasmsimd-pminmax-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__wasmsimd_pminmax_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmin0 = wasm_v128_load32_splat(input);
v128_t vmax0 = vmin0;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmin0 = wasm_f32x4_pmin(vmin0, vt);
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
vmin0 = wasm_f32x4_pmin(vmin0, wasm_v64x2_shuffle(vmin0, vmin0, 1, 1));
vmax0 = wasm_f32x4_pmax(vmax0, wasm_v64x2_shuffle(vmax0, vmax0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmin0 = wasm_f32x4_pmin(vmin0, vt);
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
vmin0 = wasm_f32x4_pmin(vmin0, wasm_v32x4_shuffle(vmin0, vmin0, 1, 1, 1, 1));
vmax0 = wasm_f32x4_pmax(vmax0, wasm_v32x4_shuffle(vmax0, vmax0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmin0 = wasm_f32x4_pmin(vmin0, vt);
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
wasm_v128_store32_lane(output, vmin0, 0);
wasm_v128_store32_lane(output + 1, vmax0, 0);
}
| 1,731 | 29.928571 | 79 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-rminmax/gen/f32-rminmax-wasmsimd-pminmax-x8-acc2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-rminmax/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/reduce.h>
void xnn_f32_rminmax_ukernel__wasmsimd_pminmax_x8_acc2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_default_params* params)
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
v128_t vmin0 = wasm_v128_load32_splat(input);
v128_t vmax0 = vmin0;
v128_t vmin1 = vmin0;
v128_t vmax1 = vmax0;
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vt0 = wasm_v128_load(input);
const v128_t vt1 = wasm_v128_load(input + 4);
input += 8;
vmin0 = wasm_f32x4_pmin(vmin0, vt0);
vmax0 = wasm_f32x4_pmax(vmax0, vt0);
vmin1 = wasm_f32x4_pmin(vmin1, vt1);
vmax1 = wasm_f32x4_pmax(vmax1, vt1);
}
vmin0 = wasm_f32x4_pmin(vmin0, vmin1);
vmax0 = wasm_f32x4_pmax(vmax0, vmax1);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vt = wasm_v128_load(input);
input += 4;
vmin0 = wasm_f32x4_pmin(vmin0, vt);
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
vmin0 = wasm_f32x4_pmin(vmin0, wasm_v64x2_shuffle(vmin0, vmin0, 1, 1));
vmax0 = wasm_f32x4_pmax(vmax0, wasm_v64x2_shuffle(vmax0, vmax0, 1, 1));
if XNN_UNLIKELY(batch & (2 * sizeof(float))) {
const v128_t vt = wasm_v128_load64_zero(input);
input += 2;
vmin0 = wasm_f32x4_pmin(vmin0, vt);
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
vmin0 = wasm_f32x4_pmin(vmin0, wasm_v32x4_shuffle(vmin0, vmin0, 1, 1, 1, 1));
vmax0 = wasm_f32x4_pmax(vmax0, wasm_v32x4_shuffle(vmax0, vmax0, 1, 1, 1, 1));
if XNN_UNLIKELY(batch & (1 * sizeof(float))) {
const v128_t vt = wasm_v128_load32_zero(input);
vmin0 = wasm_f32x4_pmin(vmin0, vt);
vmax0 = wasm_f32x4_pmax(vmax0, vt);
}
wasm_v128_store32_lane(output, vmin0, 0);
wasm_v128_store32_lane(output + 1, vmax0, 0);
}
| 2,214 | 30.642857 | 79 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.