repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmrelaxedsimd-fma-rr2-lut16-p3-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasmrelaxedsimd_fma_rr2_lut16_p3_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.index_mask);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_lo);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.one);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
v128_t vxGHIJ = wasm_v128_load(input + 16);
input += 20;
const v128_t vz0123 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
const v128_t vzCDEF = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vxCDEF, vprescale));
const v128_t vzGHIJ = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vxGHIJ, vprescale));
v128_t vn0123 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz0123, vlog2e));
v128_t vn4567 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz4567, vlog2e));
v128_t vn89AB = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz89AB, vlog2e));
v128_t vnCDEF = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzCDEF, vlog2e));
v128_t vnGHIJ = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzGHIJ, vlog2e));
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ven89AB = wasm_i32x4_shl(vn89AB, 19);
const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
const v128_t venCDEF = wasm_i32x4_shl(vnCDEF, 19);
const v128_t vidxGHIJ = wasm_i32x4_shl(wasm_v128_and(vnGHIJ, vindex_mask), 2);
const v128_t venGHIJ = wasm_i32x4_shl(vnGHIJ, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx8));
const uint32_t vidxC = wasm_u32x4_extract_lane(vidxCDEF, 0);
v128_t vlCDEF = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxC));
const uint32_t vidxG = wasm_u32x4_extract_lane(vidxGHIJ, 0);
v128_t vlGHIJ = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxG));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidxD = wasm_u32x4_extract_lane(vidxCDEF, 1);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxD), vlCDEF, 1);
const uint32_t vidxH = wasm_u32x4_extract_lane(vidxGHIJ, 1);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxH), vlGHIJ, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidxE = wasm_u32x4_extract_lane(vidxCDEF, 2);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxE), vlCDEF, 2);
const uint32_t vidxI = wasm_u32x4_extract_lane(vidxGHIJ, 2);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxI), vlGHIJ, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxB), vl89AB, 3);
const uint32_t vidxF = wasm_u32x4_extract_lane(vidxCDEF, 3);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxF), vlCDEF, 3);
const uint32_t vidxJ = wasm_u32x4_extract_lane(vidxGHIJ, 3);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxJ), vlGHIJ, 3);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vs89AB = wasm_i32x4_add(vl89AB, ven89AB);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vsCDEF = wasm_i32x4_add(vlCDEF, venCDEF);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
v128_t vsGHIJ = wasm_i32x4_add(vlGHIJ, venGHIJ);
v128_t vt0123 = wasm_f32x4_add(vz0123, wasm_f32x4_mul(vn0123, vminus_ln2_hi));
v128_t vt4567 = wasm_f32x4_add(vz4567, wasm_f32x4_mul(vn4567, vminus_ln2_hi));
v128_t vt89AB = wasm_f32x4_add(vz89AB, wasm_f32x4_mul(vn89AB, vminus_ln2_hi));
v128_t vtCDEF = wasm_f32x4_add(vzCDEF, wasm_f32x4_mul(vnCDEF, vminus_ln2_hi));
v128_t vtGHIJ = wasm_f32x4_add(vzGHIJ, wasm_f32x4_mul(vnGHIJ, vminus_ln2_hi));
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_lo), vtGHIJ);
v128_t vp0123 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt0123));
v128_t vp4567 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt4567));
v128_t vp89AB = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt89AB));
v128_t vpCDEF = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vtCDEF));
v128_t vpGHIJ = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vtGHIJ));
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
vpGHIJ = wasm_f32x4_mul(vpGHIJ, vtGHIJ);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
vsGHIJ = wasm_f32x4_sub(vsGHIJ, vone);
vp0123 = wasm_f32x4_add(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_add(vt4567, wasm_f32x4_mul(vp4567, vt4567));
vp89AB = wasm_f32x4_add(vt89AB, wasm_f32x4_mul(vp89AB, vt89AB));
vpCDEF = wasm_f32x4_add(vtCDEF, wasm_f32x4_mul(vpCDEF, vtCDEF));
vpGHIJ = wasm_f32x4_add(vtGHIJ, wasm_f32x4_mul(vpGHIJ, vtGHIJ));
const v128_t ve0123 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp0123, vs0123));
const v128_t ve4567 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp4567, vs4567));
const v128_t ve89AB = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp89AB, vs89AB));
const v128_t veCDEF = wasm_f32x4_mul(valpha, wasm_f32x4_add(vpCDEF, vsCDEF));
const v128_t veGHIJ = wasm_f32x4_mul(valpha, wasm_f32x4_add(vpGHIJ, vsGHIJ));
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
const v128_t vsignmGHIJ = wasm_i32x4_shr(vxGHIJ, 31);
vxGHIJ = wasm_f32x4_mul(vxGHIJ, vbeta);
const v128_t vy0123 = __builtin_wasm_relaxed_laneselect_i32x4(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = __builtin_wasm_relaxed_laneselect_i32x4(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = __builtin_wasm_relaxed_laneselect_i32x4(ve89AB, vx89AB, vsignm89AB);
const v128_t vyCDEF = __builtin_wasm_relaxed_laneselect_i32x4(veCDEF, vxCDEF, vsignmCDEF);
const v128_t vyGHIJ = __builtin_wasm_relaxed_laneselect_i32x4(veGHIJ, vxGHIJ, vsignmGHIJ);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
wasm_v128_store(output + 16, vyGHIJ);
output += 20;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 15,764 | 53.175258 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmrelaxedsimd-fma-rr2-lut16-p3-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasmrelaxedsimd_fma_rr2_lut16_p3_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.index_mask);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_lo);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.one);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
v128_t vxGHIJ = wasm_v128_load(input + 16);
v128_t vxKLMN = wasm_v128_load(input + 20);
input += 24;
const v128_t vz0123 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
const v128_t vzCDEF = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vxCDEF, vprescale));
const v128_t vzGHIJ = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vxGHIJ, vprescale));
const v128_t vzKLMN = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vxKLMN, vprescale));
v128_t vn0123 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz0123, vlog2e));
v128_t vn4567 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz4567, vlog2e));
v128_t vn89AB = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz89AB, vlog2e));
v128_t vnCDEF = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzCDEF, vlog2e));
v128_t vnGHIJ = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzGHIJ, vlog2e));
v128_t vnKLMN = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzKLMN, vlog2e));
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ven89AB = wasm_i32x4_shl(vn89AB, 19);
const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
const v128_t venCDEF = wasm_i32x4_shl(vnCDEF, 19);
const v128_t vidxGHIJ = wasm_i32x4_shl(wasm_v128_and(vnGHIJ, vindex_mask), 2);
const v128_t venGHIJ = wasm_i32x4_shl(vnGHIJ, 19);
const v128_t vidxKLMN = wasm_i32x4_shl(wasm_v128_and(vnKLMN, vindex_mask), 2);
const v128_t venKLMN = wasm_i32x4_shl(vnKLMN, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx8));
const uint32_t vidxC = wasm_u32x4_extract_lane(vidxCDEF, 0);
v128_t vlCDEF = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxC));
const uint32_t vidxG = wasm_u32x4_extract_lane(vidxGHIJ, 0);
v128_t vlGHIJ = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxG));
const uint32_t vidxK = wasm_u32x4_extract_lane(vidxKLMN, 0);
v128_t vlKLMN = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxK));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidxD = wasm_u32x4_extract_lane(vidxCDEF, 1);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxD), vlCDEF, 1);
const uint32_t vidxH = wasm_u32x4_extract_lane(vidxGHIJ, 1);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxH), vlGHIJ, 1);
const uint32_t vidxL = wasm_u32x4_extract_lane(vidxKLMN, 1);
vlKLMN = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxL), vlKLMN, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidxE = wasm_u32x4_extract_lane(vidxCDEF, 2);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxE), vlCDEF, 2);
const uint32_t vidxI = wasm_u32x4_extract_lane(vidxGHIJ, 2);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxI), vlGHIJ, 2);
const uint32_t vidxM = wasm_u32x4_extract_lane(vidxKLMN, 2);
vlKLMN = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxM), vlKLMN, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxB), vl89AB, 3);
const uint32_t vidxF = wasm_u32x4_extract_lane(vidxCDEF, 3);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxF), vlCDEF, 3);
const uint32_t vidxJ = wasm_u32x4_extract_lane(vidxGHIJ, 3);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxJ), vlGHIJ, 3);
const uint32_t vidxN = wasm_u32x4_extract_lane(vidxKLMN, 3);
vlKLMN = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxN), vlKLMN, 3);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vs89AB = wasm_i32x4_add(vl89AB, ven89AB);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vsCDEF = wasm_i32x4_add(vlCDEF, venCDEF);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
v128_t vsGHIJ = wasm_i32x4_add(vlGHIJ, venGHIJ);
vnKLMN = wasm_f32x4_sub(vnKLMN, vmagic_bias);
v128_t vsKLMN = wasm_i32x4_add(vlKLMN, venKLMN);
v128_t vt0123 = wasm_f32x4_add(vz0123, wasm_f32x4_mul(vn0123, vminus_ln2_hi));
v128_t vt4567 = wasm_f32x4_add(vz4567, wasm_f32x4_mul(vn4567, vminus_ln2_hi));
v128_t vt89AB = wasm_f32x4_add(vz89AB, wasm_f32x4_mul(vn89AB, vminus_ln2_hi));
v128_t vtCDEF = wasm_f32x4_add(vzCDEF, wasm_f32x4_mul(vnCDEF, vminus_ln2_hi));
v128_t vtGHIJ = wasm_f32x4_add(vzGHIJ, wasm_f32x4_mul(vnGHIJ, vminus_ln2_hi));
v128_t vtKLMN = wasm_f32x4_add(vzKLMN, wasm_f32x4_mul(vnKLMN, vminus_ln2_hi));
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_lo), vtGHIJ);
vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vminus_ln2_lo), vtKLMN);
v128_t vp0123 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt0123));
v128_t vp4567 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt4567));
v128_t vp89AB = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt89AB));
v128_t vpCDEF = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vtCDEF));
v128_t vpGHIJ = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vtGHIJ));
v128_t vpKLMN = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vtKLMN));
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
vpGHIJ = wasm_f32x4_mul(vpGHIJ, vtGHIJ);
vpKLMN = wasm_f32x4_mul(vpKLMN, vtKLMN);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
vsGHIJ = wasm_f32x4_sub(vsGHIJ, vone);
vtKLMN = wasm_f32x4_mul(vtKLMN, vsKLMN);
vsKLMN = wasm_f32x4_sub(vsKLMN, vone);
vp0123 = wasm_f32x4_add(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_add(vt4567, wasm_f32x4_mul(vp4567, vt4567));
vp89AB = wasm_f32x4_add(vt89AB, wasm_f32x4_mul(vp89AB, vt89AB));
vpCDEF = wasm_f32x4_add(vtCDEF, wasm_f32x4_mul(vpCDEF, vtCDEF));
vpGHIJ = wasm_f32x4_add(vtGHIJ, wasm_f32x4_mul(vpGHIJ, vtGHIJ));
vpKLMN = wasm_f32x4_add(vtKLMN, wasm_f32x4_mul(vpKLMN, vtKLMN));
const v128_t ve0123 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp0123, vs0123));
const v128_t ve4567 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp4567, vs4567));
const v128_t ve89AB = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp89AB, vs89AB));
const v128_t veCDEF = wasm_f32x4_mul(valpha, wasm_f32x4_add(vpCDEF, vsCDEF));
const v128_t veGHIJ = wasm_f32x4_mul(valpha, wasm_f32x4_add(vpGHIJ, vsGHIJ));
const v128_t veKLMN = wasm_f32x4_mul(valpha, wasm_f32x4_add(vpKLMN, vsKLMN));
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
const v128_t vsignmGHIJ = wasm_i32x4_shr(vxGHIJ, 31);
vxGHIJ = wasm_f32x4_mul(vxGHIJ, vbeta);
const v128_t vsignmKLMN = wasm_i32x4_shr(vxKLMN, 31);
vxKLMN = wasm_f32x4_mul(vxKLMN, vbeta);
const v128_t vy0123 = __builtin_wasm_relaxed_laneselect_i32x4(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = __builtin_wasm_relaxed_laneselect_i32x4(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = __builtin_wasm_relaxed_laneselect_i32x4(ve89AB, vx89AB, vsignm89AB);
const v128_t vyCDEF = __builtin_wasm_relaxed_laneselect_i32x4(veCDEF, vxCDEF, vsignmCDEF);
const v128_t vyGHIJ = __builtin_wasm_relaxed_laneselect_i32x4(veGHIJ, vxGHIJ, vsignmGHIJ);
const v128_t vyKLMN = __builtin_wasm_relaxed_laneselect_i32x4(veKLMN, vxKLMN, vsignmKLMN);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
wasm_v128_store(output + 16, vyGHIJ);
wasm_v128_store(output + 20, vyKLMN);
output += 24;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 17,750 | 54.820755 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmrelaxedsimd-fma-rr2-lut16-p3-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasmrelaxedsimd_fma_rr2_lut16_p3_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.index_mask);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_lo);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.one);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 5,723 | 41.088235 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmrelaxedsimd-fma-rr2-lut16-p3-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasmrelaxedsimd_fma_rr2_lut16_p3_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.index_mask);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_lo);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
const v128_t vz0123 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
v128_t vn0123 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz0123, vlog2e));
v128_t vn4567 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz4567, vlog2e));
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx7), vl4567, 3);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
v128_t vt0123 = wasm_f32x4_add(vz0123, wasm_f32x4_mul(vn0123, vminus_ln2_hi));
v128_t vt4567 = wasm_f32x4_add(vz4567, wasm_f32x4_mul(vn4567, vminus_ln2_hi));
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
v128_t vp0123 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt0123));
v128_t vp4567 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt4567));
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vp0123 = wasm_f32x4_add(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_add(vt4567, wasm_f32x4_mul(vp4567, vt4567));
const v128_t ve0123 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp0123, vs0123));
const v128_t ve4567 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp4567, vs4567));
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vy0123 = __builtin_wasm_relaxed_laneselect_i32x4(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = __builtin_wasm_relaxed_laneselect_i32x4(ve4567, vx4567, vsignm4567);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 9,803 | 45.685714 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmrelaxedsimd-fma-rr2-p6-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__wasmrelaxedsimd_fma_rr2_p6_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.log2e);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_lo);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.one);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
input += 12;
const v128_t vz0123 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vz0123, vlog2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vz4567, vlog2e, vmagic_bias);
v128_t vn89AB = __builtin_wasm_relaxed_madd_f32x4(vz89AB, vlog2e, vmagic_bias);
v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_hi, vz0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_hi, vz4567);
v128_t vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_hi, vz89AB);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_lo, vt4567);
vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_lo, vt89AB);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vc6, vt0123, vc5);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vc6, vt4567, vc5);
v128_t vp89AB = __builtin_wasm_relaxed_madd_f32x4(vc6, vt89AB, vc5);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc4);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc4);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc3);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc2);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc2);
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vt0123);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vt4567);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vt89AB);
const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vy0123 = __builtin_wasm_relaxed_laneselect_i32x4(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = __builtin_wasm_relaxed_laneselect_i32x4(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = __builtin_wasm_relaxed_laneselect_i32x4(ve89AB, vx89AB, vsignm89AB);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vlog2e, vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc6, vt, vc5);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vlog2e, vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc6, vt, vc5);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 8,431 | 42.689119 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmrelaxedsimd-fma-rr2-p6-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__wasmrelaxedsimd_fma_rr2_p6_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.log2e);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_lo);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
const v128_t vz0123 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
const v128_t vzCDEF = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vxCDEF, vprescale));
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vz0123, vlog2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vz4567, vlog2e, vmagic_bias);
v128_t vn89AB = __builtin_wasm_relaxed_madd_f32x4(vz89AB, vlog2e, vmagic_bias);
v128_t vnCDEF = __builtin_wasm_relaxed_madd_f32x4(vzCDEF, vlog2e, vmagic_bias);
v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_hi, vz0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_hi, vz4567);
v128_t vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_hi, vz89AB);
v128_t vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vminus_ln2_hi, vzCDEF);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_lo, vt4567);
vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_lo, vt89AB);
vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vminus_ln2_lo, vtCDEF);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vc6, vt0123, vc5);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vc6, vt4567, vc5);
v128_t vp89AB = __builtin_wasm_relaxed_madd_f32x4(vc6, vt89AB, vc5);
v128_t vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vc6, vtCDEF, vc5);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc4);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc4);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc4);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc3);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc3);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc2);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc2);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc2);
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vt0123);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vt4567);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vt89AB);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vtCDEF);
const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
const v128_t veCDEF = wasm_f32x4_mul(wasm_f32x4_add(vpCDEF, vsCDEF), valpha);
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
const v128_t vy0123 = __builtin_wasm_relaxed_laneselect_i32x4(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = __builtin_wasm_relaxed_laneselect_i32x4(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = __builtin_wasm_relaxed_laneselect_i32x4(ve89AB, vx89AB, vsignm89AB);
const v128_t vyCDEF = __builtin_wasm_relaxed_laneselect_i32x4(veCDEF, vxCDEF, vsignmCDEF);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vlog2e, vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc6, vt, vc5);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vlog2e, vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc6, vt, vc5);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 9,740 | 44.732394 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmrelaxedsimd-fma-rr2-p6-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__wasmrelaxedsimd_fma_rr2_p6_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.log2e);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_lo);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.one);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
v128_t vxGHIJ = wasm_v128_load(input + 16);
input += 20;
const v128_t vz0123 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
const v128_t vzCDEF = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vxCDEF, vprescale));
const v128_t vzGHIJ = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vxGHIJ, vprescale));
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vz0123, vlog2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vz4567, vlog2e, vmagic_bias);
v128_t vn89AB = __builtin_wasm_relaxed_madd_f32x4(vz89AB, vlog2e, vmagic_bias);
v128_t vnCDEF = __builtin_wasm_relaxed_madd_f32x4(vzCDEF, vlog2e, vmagic_bias);
v128_t vnGHIJ = __builtin_wasm_relaxed_madd_f32x4(vzGHIJ, vlog2e, vmagic_bias);
v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_hi, vz0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_hi, vz4567);
v128_t vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_hi, vz89AB);
v128_t vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vminus_ln2_hi, vzCDEF);
v128_t vtGHIJ = __builtin_wasm_relaxed_madd_f32x4(vnGHIJ, vminus_ln2_hi, vzGHIJ);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_lo, vt4567);
vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_lo, vt89AB);
vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vminus_ln2_lo, vtCDEF);
vtGHIJ = __builtin_wasm_relaxed_madd_f32x4(vnGHIJ, vminus_ln2_lo, vtGHIJ);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vc6, vt0123, vc5);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vc6, vt4567, vc5);
v128_t vp89AB = __builtin_wasm_relaxed_madd_f32x4(vc6, vt89AB, vc5);
v128_t vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vc6, vtCDEF, vc5);
v128_t vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vc6, vtGHIJ, vc5);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc4);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc4);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc4);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc4);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vpGHIJ, vtGHIJ, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc3);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc3);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc3);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vpGHIJ, vtGHIJ, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc2);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc2);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc2);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vpGHIJ, vtGHIJ, vc2);
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
vpGHIJ = wasm_f32x4_mul(vpGHIJ, vtGHIJ);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
vsGHIJ = wasm_f32x4_sub(vsGHIJ, vone);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vt0123);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vt4567);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vt89AB);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vtCDEF);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vpGHIJ, vtGHIJ, vtGHIJ);
const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
const v128_t veCDEF = wasm_f32x4_mul(wasm_f32x4_add(vpCDEF, vsCDEF), valpha);
const v128_t veGHIJ = wasm_f32x4_mul(wasm_f32x4_add(vpGHIJ, vsGHIJ), valpha);
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
const v128_t vsignmGHIJ = wasm_i32x4_shr(vxGHIJ, 31);
vxGHIJ = wasm_f32x4_mul(vxGHIJ, vbeta);
const v128_t vy0123 = __builtin_wasm_relaxed_laneselect_i32x4(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = __builtin_wasm_relaxed_laneselect_i32x4(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = __builtin_wasm_relaxed_laneselect_i32x4(ve89AB, vx89AB, vsignm89AB);
const v128_t vyCDEF = __builtin_wasm_relaxed_laneselect_i32x4(veCDEF, vxCDEF, vsignmCDEF);
const v128_t vyGHIJ = __builtin_wasm_relaxed_laneselect_i32x4(veGHIJ, vxGHIJ, vsignmGHIJ);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
wasm_v128_store(output + 16, vyGHIJ);
output += 20;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vlog2e, vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc6, vt, vc5);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vlog2e, vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc6, vt, vc5);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 11,049 | 46.424893 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmrelaxedsimd-fma-rr2-p6-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__wasmrelaxedsimd_fma_rr2_p6_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.log2e);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_lo);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.one);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
v128_t vxGHIJ = wasm_v128_load(input + 16);
v128_t vxKLMN = wasm_v128_load(input + 20);
input += 24;
const v128_t vz0123 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
const v128_t vzCDEF = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vxCDEF, vprescale));
const v128_t vzGHIJ = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vxGHIJ, vprescale));
const v128_t vzKLMN = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vxKLMN, vprescale));
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vz0123, vlog2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vz4567, vlog2e, vmagic_bias);
v128_t vn89AB = __builtin_wasm_relaxed_madd_f32x4(vz89AB, vlog2e, vmagic_bias);
v128_t vnCDEF = __builtin_wasm_relaxed_madd_f32x4(vzCDEF, vlog2e, vmagic_bias);
v128_t vnGHIJ = __builtin_wasm_relaxed_madd_f32x4(vzGHIJ, vlog2e, vmagic_bias);
v128_t vnKLMN = __builtin_wasm_relaxed_madd_f32x4(vzKLMN, vlog2e, vmagic_bias);
v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
v128_t vsKLMN = wasm_i32x4_shl(vnKLMN, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
vnKLMN = wasm_f32x4_sub(vnKLMN, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_hi, vz0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_hi, vz4567);
v128_t vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_hi, vz89AB);
v128_t vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vminus_ln2_hi, vzCDEF);
v128_t vtGHIJ = __builtin_wasm_relaxed_madd_f32x4(vnGHIJ, vminus_ln2_hi, vzGHIJ);
v128_t vtKLMN = __builtin_wasm_relaxed_madd_f32x4(vnKLMN, vminus_ln2_hi, vzKLMN);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_lo, vt4567);
vt89AB = __builtin_wasm_relaxed_madd_f32x4(vn89AB, vminus_ln2_lo, vt89AB);
vtCDEF = __builtin_wasm_relaxed_madd_f32x4(vnCDEF, vminus_ln2_lo, vtCDEF);
vtGHIJ = __builtin_wasm_relaxed_madd_f32x4(vnGHIJ, vminus_ln2_lo, vtGHIJ);
vtKLMN = __builtin_wasm_relaxed_madd_f32x4(vnKLMN, vminus_ln2_lo, vtKLMN);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vc6, vt0123, vc5);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vc6, vt4567, vc5);
v128_t vp89AB = __builtin_wasm_relaxed_madd_f32x4(vc6, vt89AB, vc5);
v128_t vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vc6, vtCDEF, vc5);
v128_t vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vc6, vtGHIJ, vc5);
v128_t vpKLMN = __builtin_wasm_relaxed_madd_f32x4(vc6, vtKLMN, vc5);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc4);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc4);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc4);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc4);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vpGHIJ, vtGHIJ, vc4);
vpKLMN = __builtin_wasm_relaxed_madd_f32x4(vpKLMN, vtKLMN, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc3);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc3);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc3);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vpGHIJ, vtGHIJ, vc3);
vpKLMN = __builtin_wasm_relaxed_madd_f32x4(vpKLMN, vtKLMN, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc2);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vc2);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vc2);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vpGHIJ, vtGHIJ, vc2);
vpKLMN = __builtin_wasm_relaxed_madd_f32x4(vpKLMN, vtKLMN, vc2);
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
vpGHIJ = wasm_f32x4_mul(vpGHIJ, vtGHIJ);
vpKLMN = wasm_f32x4_mul(vpKLMN, vtKLMN);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
vsGHIJ = wasm_f32x4_sub(vsGHIJ, vone);
vtKLMN = wasm_f32x4_mul(vtKLMN, vsKLMN);
vsKLMN = wasm_f32x4_sub(vsKLMN, vone);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vt0123);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vt4567);
vp89AB = __builtin_wasm_relaxed_madd_f32x4(vp89AB, vt89AB, vt89AB);
vpCDEF = __builtin_wasm_relaxed_madd_f32x4(vpCDEF, vtCDEF, vtCDEF);
vpGHIJ = __builtin_wasm_relaxed_madd_f32x4(vpGHIJ, vtGHIJ, vtGHIJ);
vpKLMN = __builtin_wasm_relaxed_madd_f32x4(vpKLMN, vtKLMN, vtKLMN);
const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
const v128_t veCDEF = wasm_f32x4_mul(wasm_f32x4_add(vpCDEF, vsCDEF), valpha);
const v128_t veGHIJ = wasm_f32x4_mul(wasm_f32x4_add(vpGHIJ, vsGHIJ), valpha);
const v128_t veKLMN = wasm_f32x4_mul(wasm_f32x4_add(vpKLMN, vsKLMN), valpha);
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
const v128_t vsignmGHIJ = wasm_i32x4_shr(vxGHIJ, 31);
vxGHIJ = wasm_f32x4_mul(vxGHIJ, vbeta);
const v128_t vsignmKLMN = wasm_i32x4_shr(vxKLMN, 31);
vxKLMN = wasm_f32x4_mul(vxKLMN, vbeta);
const v128_t vy0123 = __builtin_wasm_relaxed_laneselect_i32x4(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = __builtin_wasm_relaxed_laneselect_i32x4(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = __builtin_wasm_relaxed_laneselect_i32x4(ve89AB, vx89AB, vsignm89AB);
const v128_t vyCDEF = __builtin_wasm_relaxed_laneselect_i32x4(veCDEF, vxCDEF, vsignmCDEF);
const v128_t vyGHIJ = __builtin_wasm_relaxed_laneselect_i32x4(veGHIJ, vxGHIJ, vsignmGHIJ);
const v128_t vyKLMN = __builtin_wasm_relaxed_laneselect_i32x4(veKLMN, vxKLMN, vsignmKLMN);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
wasm_v128_store(output + 16, vyGHIJ);
wasm_v128_store(output + 20, vyKLMN);
output += 24;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vlog2e, vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc6, vt, vc5);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vlog2e, vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc6, vt, vc5);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 12,358 | 47.849802 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmrelaxedsimd-fma-rr2-p6-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__wasmrelaxedsimd_fma_rr2_p6_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.log2e);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_lo);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.one);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vlog2e, vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc6, vt, vc5);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vlog2e, vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc6, vt, vc5);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 4,392 | 38.223214 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmrelaxedsimd-fma-rr2-p6-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__wasmrelaxedsimd_fma_rr2_p6_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.log2e);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_lo);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
const v128_t vz0123 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
v128_t vn0123 = __builtin_wasm_relaxed_madd_f32x4(vz0123, vlog2e, vmagic_bias);
v128_t vn4567 = __builtin_wasm_relaxed_madd_f32x4(vz4567, vlog2e, vmagic_bias);
v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_hi, vz0123);
v128_t vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_hi, vz4567);
vt0123 = __builtin_wasm_relaxed_madd_f32x4(vn0123, vminus_ln2_lo, vt0123);
vt4567 = __builtin_wasm_relaxed_madd_f32x4(vn4567, vminus_ln2_lo, vt4567);
v128_t vp0123 = __builtin_wasm_relaxed_madd_f32x4(vc6, vt0123, vc5);
v128_t vp4567 = __builtin_wasm_relaxed_madd_f32x4(vc6, vt4567, vc5);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc4);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc4);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc3);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc3);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vc2);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vc2);
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vp0123 = __builtin_wasm_relaxed_madd_f32x4(vp0123, vt0123, vt0123);
vp4567 = __builtin_wasm_relaxed_madd_f32x4(vp4567, vt4567, vt4567);
const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vy0123 = __builtin_wasm_relaxed_laneselect_i32x4(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = __builtin_wasm_relaxed_laneselect_i32x4(ve4567, vx4567, vsignm4567);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vlog2e, vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc6, vt, vc5);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
v128_t vn = __builtin_wasm_relaxed_madd_f32x4(vz, vlog2e, vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_hi, vz);
vt = __builtin_wasm_relaxed_madd_f32x4(vn, vminus_ln2_lo, vt);
v128_t vp = __builtin_wasm_relaxed_madd_f32x4(vc6, vt, vc5);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc4);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc3);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = __builtin_wasm_relaxed_madd_f32x4(vp, vt, vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 7,119 | 40.156069 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmrelaxedsimd-rr2-lut16-p3-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasmrelaxedsimd_rr2_lut16_p3_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.index_mask);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_lo);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.one);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
input += 12;
const v128_t vz0123 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
v128_t vn0123 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz0123, vlog2e));
v128_t vn4567 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz4567, vlog2e));
v128_t vn89AB = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz89AB, vlog2e));
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ven89AB = wasm_i32x4_shl(vn89AB, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx8));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxB), vl89AB, 3);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vs89AB = wasm_i32x4_add(vl89AB, ven89AB);
v128_t vt0123 = wasm_f32x4_add(vz0123, wasm_f32x4_mul(vn0123, vminus_ln2_hi));
v128_t vt4567 = wasm_f32x4_add(vz4567, wasm_f32x4_mul(vn4567, vminus_ln2_hi));
v128_t vt89AB = wasm_f32x4_add(vz89AB, wasm_f32x4_mul(vn89AB, vminus_ln2_hi));
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
v128_t vp0123 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt0123));
v128_t vp4567 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt4567));
v128_t vp89AB = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt89AB));
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vp0123 = wasm_f32x4_add(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_add(vt4567, wasm_f32x4_mul(vp4567, vt4567));
vp89AB = wasm_f32x4_add(vt89AB, wasm_f32x4_mul(vp89AB, vt89AB));
const v128_t ve0123 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp0123, vs0123));
const v128_t ve4567 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp4567, vs4567));
const v128_t ve89AB = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp89AB, vs89AB));
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vy0123 = __builtin_wasm_relaxed_laneselect_i32x4(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = __builtin_wasm_relaxed_laneselect_i32x4(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = __builtin_wasm_relaxed_laneselect_i32x4(ve89AB, vx89AB, vsignm89AB);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 11,788 | 48.742616 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmrelaxedsimd-rr2-lut16-p3-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasmrelaxedsimd_rr2_lut16_p3_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.index_mask);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_lo);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
const v128_t vz0123 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
const v128_t vzCDEF = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vxCDEF, vprescale));
v128_t vn0123 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz0123, vlog2e));
v128_t vn4567 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz4567, vlog2e));
v128_t vn89AB = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz89AB, vlog2e));
v128_t vnCDEF = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzCDEF, vlog2e));
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ven89AB = wasm_i32x4_shl(vn89AB, 19);
const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
const v128_t venCDEF = wasm_i32x4_shl(vnCDEF, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx8));
const uint32_t vidxC = wasm_u32x4_extract_lane(vidxCDEF, 0);
v128_t vlCDEF = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxC));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidxD = wasm_u32x4_extract_lane(vidxCDEF, 1);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxD), vlCDEF, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidxE = wasm_u32x4_extract_lane(vidxCDEF, 2);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxE), vlCDEF, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxB), vl89AB, 3);
const uint32_t vidxF = wasm_u32x4_extract_lane(vidxCDEF, 3);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxF), vlCDEF, 3);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vs89AB = wasm_i32x4_add(vl89AB, ven89AB);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vsCDEF = wasm_i32x4_add(vlCDEF, venCDEF);
v128_t vt0123 = wasm_f32x4_add(vz0123, wasm_f32x4_mul(vn0123, vminus_ln2_hi));
v128_t vt4567 = wasm_f32x4_add(vz4567, wasm_f32x4_mul(vn4567, vminus_ln2_hi));
v128_t vt89AB = wasm_f32x4_add(vz89AB, wasm_f32x4_mul(vn89AB, vminus_ln2_hi));
v128_t vtCDEF = wasm_f32x4_add(vzCDEF, wasm_f32x4_mul(vnCDEF, vminus_ln2_hi));
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
v128_t vp0123 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt0123));
v128_t vp4567 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt4567));
v128_t vp89AB = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt89AB));
v128_t vpCDEF = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vtCDEF));
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
vp0123 = wasm_f32x4_add(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_add(vt4567, wasm_f32x4_mul(vp4567, vt4567));
vp89AB = wasm_f32x4_add(vt89AB, wasm_f32x4_mul(vp89AB, vt89AB));
vpCDEF = wasm_f32x4_add(vtCDEF, wasm_f32x4_mul(vpCDEF, vtCDEF));
const v128_t ve0123 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp0123, vs0123));
const v128_t ve4567 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp4567, vs4567));
const v128_t ve89AB = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp89AB, vs89AB));
const v128_t veCDEF = wasm_f32x4_mul(valpha, wasm_f32x4_add(vpCDEF, vsCDEF));
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
const v128_t vy0123 = __builtin_wasm_relaxed_laneselect_i32x4(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = __builtin_wasm_relaxed_laneselect_i32x4(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = __builtin_wasm_relaxed_laneselect_i32x4(ve89AB, vx89AB, vsignm89AB);
const v128_t vyCDEF = __builtin_wasm_relaxed_laneselect_i32x4(veCDEF, vxCDEF, vsignmCDEF);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 13,774 | 51.17803 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmrelaxedsimd-rr2-lut16-p3-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasmrelaxedsimd_rr2_lut16_p3_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.index_mask);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_lo);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.one);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
v128_t vxGHIJ = wasm_v128_load(input + 16);
input += 20;
const v128_t vz0123 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
const v128_t vzCDEF = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vxCDEF, vprescale));
const v128_t vzGHIJ = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vxGHIJ, vprescale));
v128_t vn0123 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz0123, vlog2e));
v128_t vn4567 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz4567, vlog2e));
v128_t vn89AB = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz89AB, vlog2e));
v128_t vnCDEF = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzCDEF, vlog2e));
v128_t vnGHIJ = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzGHIJ, vlog2e));
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ven89AB = wasm_i32x4_shl(vn89AB, 19);
const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
const v128_t venCDEF = wasm_i32x4_shl(vnCDEF, 19);
const v128_t vidxGHIJ = wasm_i32x4_shl(wasm_v128_and(vnGHIJ, vindex_mask), 2);
const v128_t venGHIJ = wasm_i32x4_shl(vnGHIJ, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx8));
const uint32_t vidxC = wasm_u32x4_extract_lane(vidxCDEF, 0);
v128_t vlCDEF = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxC));
const uint32_t vidxG = wasm_u32x4_extract_lane(vidxGHIJ, 0);
v128_t vlGHIJ = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxG));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidxD = wasm_u32x4_extract_lane(vidxCDEF, 1);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxD), vlCDEF, 1);
const uint32_t vidxH = wasm_u32x4_extract_lane(vidxGHIJ, 1);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxH), vlGHIJ, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidxE = wasm_u32x4_extract_lane(vidxCDEF, 2);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxE), vlCDEF, 2);
const uint32_t vidxI = wasm_u32x4_extract_lane(vidxGHIJ, 2);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxI), vlGHIJ, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxB), vl89AB, 3);
const uint32_t vidxF = wasm_u32x4_extract_lane(vidxCDEF, 3);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxF), vlCDEF, 3);
const uint32_t vidxJ = wasm_u32x4_extract_lane(vidxGHIJ, 3);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxJ), vlGHIJ, 3);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vs89AB = wasm_i32x4_add(vl89AB, ven89AB);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vsCDEF = wasm_i32x4_add(vlCDEF, venCDEF);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
v128_t vsGHIJ = wasm_i32x4_add(vlGHIJ, venGHIJ);
v128_t vt0123 = wasm_f32x4_add(vz0123, wasm_f32x4_mul(vn0123, vminus_ln2_hi));
v128_t vt4567 = wasm_f32x4_add(vz4567, wasm_f32x4_mul(vn4567, vminus_ln2_hi));
v128_t vt89AB = wasm_f32x4_add(vz89AB, wasm_f32x4_mul(vn89AB, vminus_ln2_hi));
v128_t vtCDEF = wasm_f32x4_add(vzCDEF, wasm_f32x4_mul(vnCDEF, vminus_ln2_hi));
v128_t vtGHIJ = wasm_f32x4_add(vzGHIJ, wasm_f32x4_mul(vnGHIJ, vminus_ln2_hi));
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_lo), vtGHIJ);
v128_t vp0123 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt0123));
v128_t vp4567 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt4567));
v128_t vp89AB = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt89AB));
v128_t vpCDEF = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vtCDEF));
v128_t vpGHIJ = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vtGHIJ));
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
vpGHIJ = wasm_f32x4_mul(vpGHIJ, vtGHIJ);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
vsGHIJ = wasm_f32x4_sub(vsGHIJ, vone);
vp0123 = wasm_f32x4_add(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_add(vt4567, wasm_f32x4_mul(vp4567, vt4567));
vp89AB = wasm_f32x4_add(vt89AB, wasm_f32x4_mul(vp89AB, vt89AB));
vpCDEF = wasm_f32x4_add(vtCDEF, wasm_f32x4_mul(vpCDEF, vtCDEF));
vpGHIJ = wasm_f32x4_add(vtGHIJ, wasm_f32x4_mul(vpGHIJ, vtGHIJ));
const v128_t ve0123 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp0123, vs0123));
const v128_t ve4567 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp4567, vs4567));
const v128_t ve89AB = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp89AB, vs89AB));
const v128_t veCDEF = wasm_f32x4_mul(valpha, wasm_f32x4_add(vpCDEF, vsCDEF));
const v128_t veGHIJ = wasm_f32x4_mul(valpha, wasm_f32x4_add(vpGHIJ, vsGHIJ));
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
const v128_t vsignmGHIJ = wasm_i32x4_shr(vxGHIJ, 31);
vxGHIJ = wasm_f32x4_mul(vxGHIJ, vbeta);
const v128_t vy0123 = __builtin_wasm_relaxed_laneselect_i32x4(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = __builtin_wasm_relaxed_laneselect_i32x4(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = __builtin_wasm_relaxed_laneselect_i32x4(ve89AB, vx89AB, vsignm89AB);
const v128_t vyCDEF = __builtin_wasm_relaxed_laneselect_i32x4(veCDEF, vxCDEF, vsignmCDEF);
const v128_t vyGHIJ = __builtin_wasm_relaxed_laneselect_i32x4(veGHIJ, vxGHIJ, vsignmGHIJ);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
wasm_v128_store(output + 16, vyGHIJ);
output += 20;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 15,760 | 53.161512 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmrelaxedsimd-rr2-lut16-p3-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasmrelaxedsimd_rr2_lut16_p3_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.index_mask);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_lo);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.one);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
v128_t vxGHIJ = wasm_v128_load(input + 16);
v128_t vxKLMN = wasm_v128_load(input + 20);
input += 24;
const v128_t vz0123 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
const v128_t vzCDEF = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vxCDEF, vprescale));
const v128_t vzGHIJ = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vxGHIJ, vprescale));
const v128_t vzKLMN = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vxKLMN, vprescale));
v128_t vn0123 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz0123, vlog2e));
v128_t vn4567 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz4567, vlog2e));
v128_t vn89AB = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz89AB, vlog2e));
v128_t vnCDEF = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzCDEF, vlog2e));
v128_t vnGHIJ = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzGHIJ, vlog2e));
v128_t vnKLMN = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzKLMN, vlog2e));
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ven89AB = wasm_i32x4_shl(vn89AB, 19);
const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
const v128_t venCDEF = wasm_i32x4_shl(vnCDEF, 19);
const v128_t vidxGHIJ = wasm_i32x4_shl(wasm_v128_and(vnGHIJ, vindex_mask), 2);
const v128_t venGHIJ = wasm_i32x4_shl(vnGHIJ, 19);
const v128_t vidxKLMN = wasm_i32x4_shl(wasm_v128_and(vnKLMN, vindex_mask), 2);
const v128_t venKLMN = wasm_i32x4_shl(vnKLMN, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx8));
const uint32_t vidxC = wasm_u32x4_extract_lane(vidxCDEF, 0);
v128_t vlCDEF = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxC));
const uint32_t vidxG = wasm_u32x4_extract_lane(vidxGHIJ, 0);
v128_t vlGHIJ = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxG));
const uint32_t vidxK = wasm_u32x4_extract_lane(vidxKLMN, 0);
v128_t vlKLMN = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxK));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidxD = wasm_u32x4_extract_lane(vidxCDEF, 1);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxD), vlCDEF, 1);
const uint32_t vidxH = wasm_u32x4_extract_lane(vidxGHIJ, 1);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxH), vlGHIJ, 1);
const uint32_t vidxL = wasm_u32x4_extract_lane(vidxKLMN, 1);
vlKLMN = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxL), vlKLMN, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidxE = wasm_u32x4_extract_lane(vidxCDEF, 2);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxE), vlCDEF, 2);
const uint32_t vidxI = wasm_u32x4_extract_lane(vidxGHIJ, 2);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxI), vlGHIJ, 2);
const uint32_t vidxM = wasm_u32x4_extract_lane(vidxKLMN, 2);
vlKLMN = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxM), vlKLMN, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxB), vl89AB, 3);
const uint32_t vidxF = wasm_u32x4_extract_lane(vidxCDEF, 3);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxF), vlCDEF, 3);
const uint32_t vidxJ = wasm_u32x4_extract_lane(vidxGHIJ, 3);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxJ), vlGHIJ, 3);
const uint32_t vidxN = wasm_u32x4_extract_lane(vidxKLMN, 3);
vlKLMN = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxN), vlKLMN, 3);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vs89AB = wasm_i32x4_add(vl89AB, ven89AB);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vsCDEF = wasm_i32x4_add(vlCDEF, venCDEF);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
v128_t vsGHIJ = wasm_i32x4_add(vlGHIJ, venGHIJ);
vnKLMN = wasm_f32x4_sub(vnKLMN, vmagic_bias);
v128_t vsKLMN = wasm_i32x4_add(vlKLMN, venKLMN);
v128_t vt0123 = wasm_f32x4_add(vz0123, wasm_f32x4_mul(vn0123, vminus_ln2_hi));
v128_t vt4567 = wasm_f32x4_add(vz4567, wasm_f32x4_mul(vn4567, vminus_ln2_hi));
v128_t vt89AB = wasm_f32x4_add(vz89AB, wasm_f32x4_mul(vn89AB, vminus_ln2_hi));
v128_t vtCDEF = wasm_f32x4_add(vzCDEF, wasm_f32x4_mul(vnCDEF, vminus_ln2_hi));
v128_t vtGHIJ = wasm_f32x4_add(vzGHIJ, wasm_f32x4_mul(vnGHIJ, vminus_ln2_hi));
v128_t vtKLMN = wasm_f32x4_add(vzKLMN, wasm_f32x4_mul(vnKLMN, vminus_ln2_hi));
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_lo), vtGHIJ);
vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vminus_ln2_lo), vtKLMN);
v128_t vp0123 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt0123));
v128_t vp4567 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt4567));
v128_t vp89AB = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt89AB));
v128_t vpCDEF = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vtCDEF));
v128_t vpGHIJ = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vtGHIJ));
v128_t vpKLMN = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vtKLMN));
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
vpGHIJ = wasm_f32x4_mul(vpGHIJ, vtGHIJ);
vpKLMN = wasm_f32x4_mul(vpKLMN, vtKLMN);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
vsGHIJ = wasm_f32x4_sub(vsGHIJ, vone);
vtKLMN = wasm_f32x4_mul(vtKLMN, vsKLMN);
vsKLMN = wasm_f32x4_sub(vsKLMN, vone);
vp0123 = wasm_f32x4_add(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_add(vt4567, wasm_f32x4_mul(vp4567, vt4567));
vp89AB = wasm_f32x4_add(vt89AB, wasm_f32x4_mul(vp89AB, vt89AB));
vpCDEF = wasm_f32x4_add(vtCDEF, wasm_f32x4_mul(vpCDEF, vtCDEF));
vpGHIJ = wasm_f32x4_add(vtGHIJ, wasm_f32x4_mul(vpGHIJ, vtGHIJ));
vpKLMN = wasm_f32x4_add(vtKLMN, wasm_f32x4_mul(vpKLMN, vtKLMN));
const v128_t ve0123 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp0123, vs0123));
const v128_t ve4567 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp4567, vs4567));
const v128_t ve89AB = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp89AB, vs89AB));
const v128_t veCDEF = wasm_f32x4_mul(valpha, wasm_f32x4_add(vpCDEF, vsCDEF));
const v128_t veGHIJ = wasm_f32x4_mul(valpha, wasm_f32x4_add(vpGHIJ, vsGHIJ));
const v128_t veKLMN = wasm_f32x4_mul(valpha, wasm_f32x4_add(vpKLMN, vsKLMN));
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
const v128_t vsignmGHIJ = wasm_i32x4_shr(vxGHIJ, 31);
vxGHIJ = wasm_f32x4_mul(vxGHIJ, vbeta);
const v128_t vsignmKLMN = wasm_i32x4_shr(vxKLMN, 31);
vxKLMN = wasm_f32x4_mul(vxKLMN, vbeta);
const v128_t vy0123 = __builtin_wasm_relaxed_laneselect_i32x4(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = __builtin_wasm_relaxed_laneselect_i32x4(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = __builtin_wasm_relaxed_laneselect_i32x4(ve89AB, vx89AB, vsignm89AB);
const v128_t vyCDEF = __builtin_wasm_relaxed_laneselect_i32x4(veCDEF, vxCDEF, vsignmCDEF);
const v128_t vyGHIJ = __builtin_wasm_relaxed_laneselect_i32x4(veGHIJ, vxGHIJ, vsignmGHIJ);
const v128_t vyKLMN = __builtin_wasm_relaxed_laneselect_i32x4(veKLMN, vxKLMN, vsignmKLMN);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
wasm_v128_store(output + 16, vyGHIJ);
wasm_v128_store(output + 20, vyKLMN);
output += 24;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 17,746 | 54.808176 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmrelaxedsimd-rr2-lut16-p3-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasmrelaxedsimd_rr2_lut16_p3_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.index_mask);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_lo);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.one);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 5,719 | 41.058824 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmrelaxedsimd-rr2-lut16-p3-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasmrelaxedsimd_rr2_lut16_p3_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.index_mask);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_lo);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
const v128_t vz0123 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
v128_t vn0123 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz0123, vlog2e));
v128_t vn4567 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz4567, vlog2e));
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx7), vl4567, 3);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
v128_t vt0123 = wasm_f32x4_add(vz0123, wasm_f32x4_mul(vn0123, vminus_ln2_hi));
v128_t vt4567 = wasm_f32x4_add(vz4567, wasm_f32x4_mul(vn4567, vminus_ln2_hi));
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
v128_t vp0123 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt0123));
v128_t vp4567 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt4567));
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vp0123 = wasm_f32x4_add(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_add(vt4567, wasm_f32x4_mul(vp4567, vt4567));
const v128_t ve0123 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp0123, vs0123));
const v128_t ve4567 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp4567, vs4567));
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vy0123 = __builtin_wasm_relaxed_laneselect_i32x4(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = __builtin_wasm_relaxed_laneselect_i32x4(ve4567, vx4567, vsignm4567);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 9,799 | 45.666667 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmrelaxedsimd-rr2-p6-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__wasmrelaxedsimd_rr2_p6_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.log2e);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_lo);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.one);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
input += 12;
const v128_t vz0123 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vy0123 = __builtin_wasm_relaxed_laneselect_i32x4(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = __builtin_wasm_relaxed_laneselect_i32x4(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = __builtin_wasm_relaxed_laneselect_i32x4(ve89AB, vx89AB, vsignm89AB);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 8,307 | 42.046632 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmrelaxedsimd-rr2-p6-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__wasmrelaxedsimd_rr2_p6_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.log2e);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_lo);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
const v128_t vz0123 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
const v128_t vzCDEF = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vxCDEF, vprescale));
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vlog2e), vmagic_bias);
v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vzCDEF);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtCDEF), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vtCDEF);
const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
const v128_t veCDEF = wasm_f32x4_mul(wasm_f32x4_add(vpCDEF, vsCDEF), valpha);
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
const v128_t vy0123 = __builtin_wasm_relaxed_laneselect_i32x4(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = __builtin_wasm_relaxed_laneselect_i32x4(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = __builtin_wasm_relaxed_laneselect_i32x4(ve89AB, vx89AB, vsignm89AB);
const v128_t vyCDEF = __builtin_wasm_relaxed_laneselect_i32x4(veCDEF, vxCDEF, vsignmCDEF);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 9,592 | 44.037559 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmrelaxedsimd-rr2-p6-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__wasmrelaxedsimd_rr2_p6_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.log2e);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_lo);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.one);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
v128_t vxGHIJ = wasm_v128_load(input + 16);
input += 20;
const v128_t vz0123 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
const v128_t vzCDEF = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vxCDEF, vprescale));
const v128_t vzGHIJ = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vxGHIJ, vprescale));
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vlog2e), vmagic_bias);
v128_t vnGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vzGHIJ, vlog2e), vmagic_bias);
v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vzCDEF);
v128_t vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_hi), vzGHIJ);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_lo), vtGHIJ);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtCDEF), vc5);
v128_t vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtGHIJ), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc4);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc3);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc2);
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
vpGHIJ = wasm_f32x4_mul(vpGHIJ, vtGHIJ);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
vsGHIJ = wasm_f32x4_sub(vsGHIJ, vone);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vtCDEF);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vtGHIJ);
const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
const v128_t veCDEF = wasm_f32x4_mul(wasm_f32x4_add(vpCDEF, vsCDEF), valpha);
const v128_t veGHIJ = wasm_f32x4_mul(wasm_f32x4_add(vpGHIJ, vsGHIJ), valpha);
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
const v128_t vsignmGHIJ = wasm_i32x4_shr(vxGHIJ, 31);
vxGHIJ = wasm_f32x4_mul(vxGHIJ, vbeta);
const v128_t vy0123 = __builtin_wasm_relaxed_laneselect_i32x4(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = __builtin_wasm_relaxed_laneselect_i32x4(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = __builtin_wasm_relaxed_laneselect_i32x4(ve89AB, vx89AB, vsignm89AB);
const v128_t vyCDEF = __builtin_wasm_relaxed_laneselect_i32x4(veCDEF, vxCDEF, vsignmCDEF);
const v128_t vyGHIJ = __builtin_wasm_relaxed_laneselect_i32x4(veGHIJ, vxGHIJ, vsignmGHIJ);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
wasm_v128_store(output + 16, vyGHIJ);
output += 20;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 10,877 | 45.686695 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmrelaxedsimd-rr2-p6-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__wasmrelaxedsimd_rr2_p6_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.log2e);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_lo);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.one);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
v128_t vxGHIJ = wasm_v128_load(input + 16);
v128_t vxKLMN = wasm_v128_load(input + 20);
input += 24;
const v128_t vz0123 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
const v128_t vzCDEF = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vxCDEF, vprescale));
const v128_t vzGHIJ = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vxGHIJ, vprescale));
const v128_t vzKLMN = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vxKLMN, vprescale));
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vlog2e), vmagic_bias);
v128_t vnGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vzGHIJ, vlog2e), vmagic_bias);
v128_t vnKLMN = wasm_f32x4_add(wasm_f32x4_mul(vzKLMN, vlog2e), vmagic_bias);
v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
v128_t vsKLMN = wasm_i32x4_shl(vnKLMN, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
vnKLMN = wasm_f32x4_sub(vnKLMN, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vzCDEF);
v128_t vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_hi), vzGHIJ);
v128_t vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vminus_ln2_hi), vzKLMN);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_lo), vtGHIJ);
vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vminus_ln2_lo), vtKLMN);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtCDEF), vc5);
v128_t vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtGHIJ), vc5);
v128_t vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtKLMN), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc4);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc4);
vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vpKLMN, vtKLMN), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc3);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc3);
vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vpKLMN, vtKLMN), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc2);
vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vpKLMN, vtKLMN), vc2);
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
vpGHIJ = wasm_f32x4_mul(vpGHIJ, vtGHIJ);
vpKLMN = wasm_f32x4_mul(vpKLMN, vtKLMN);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
vsGHIJ = wasm_f32x4_sub(vsGHIJ, vone);
vtKLMN = wasm_f32x4_mul(vtKLMN, vsKLMN);
vsKLMN = wasm_f32x4_sub(vsKLMN, vone);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vtCDEF);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vtGHIJ);
vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vpKLMN, vtKLMN), vtKLMN);
const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
const v128_t veCDEF = wasm_f32x4_mul(wasm_f32x4_add(vpCDEF, vsCDEF), valpha);
const v128_t veGHIJ = wasm_f32x4_mul(wasm_f32x4_add(vpGHIJ, vsGHIJ), valpha);
const v128_t veKLMN = wasm_f32x4_mul(wasm_f32x4_add(vpKLMN, vsKLMN), valpha);
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
const v128_t vsignmGHIJ = wasm_i32x4_shr(vxGHIJ, 31);
vxGHIJ = wasm_f32x4_mul(vxGHIJ, vbeta);
const v128_t vsignmKLMN = wasm_i32x4_shr(vxKLMN, 31);
vxKLMN = wasm_f32x4_mul(vxKLMN, vbeta);
const v128_t vy0123 = __builtin_wasm_relaxed_laneselect_i32x4(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = __builtin_wasm_relaxed_laneselect_i32x4(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = __builtin_wasm_relaxed_laneselect_i32x4(ve89AB, vx89AB, vsignm89AB);
const v128_t vyCDEF = __builtin_wasm_relaxed_laneselect_i32x4(veCDEF, vxCDEF, vsignmCDEF);
const v128_t vyGHIJ = __builtin_wasm_relaxed_laneselect_i32x4(veGHIJ, vxGHIJ, vsignmGHIJ);
const v128_t vyKLMN = __builtin_wasm_relaxed_laneselect_i32x4(veKLMN, vxKLMN, vsignmKLMN);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
wasm_v128_store(output + 16, vyGHIJ);
wasm_v128_store(output + 20, vyKLMN);
output += 24;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 12,162 | 47.075099 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmrelaxedsimd-rr2-p6-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__wasmrelaxedsimd_rr2_p6_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.log2e);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_lo);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.one);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 4,340 | 37.758929 | 99 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmrelaxedsimd-rr2-p6-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__wasmrelaxedsimd_rr2_p6_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.log2e);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_lo);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
const v128_t vz0123 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vy0123 = __builtin_wasm_relaxed_laneselect_i32x4(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = __builtin_wasm_relaxed_laneselect_i32x4(ve4567, vx4567, vsignm4567);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = __builtin_wasm_relaxed_max_f32x4(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = __builtin_wasm_relaxed_laneselect_i32x4(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 7,019 | 39.578035 | 107 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmsimd-arm-rr2-lut16-p3-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.index_mask);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_lo);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.one);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
input += 12;
const v128_t vz0123 = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
v128_t vn0123 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz0123, vlog2e));
v128_t vn4567 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz4567, vlog2e));
v128_t vn89AB = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz89AB, vlog2e));
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ven89AB = wasm_i32x4_shl(vn89AB, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx8));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxB), vl89AB, 3);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vs89AB = wasm_i32x4_add(vl89AB, ven89AB);
v128_t vt0123 = wasm_f32x4_add(vz0123, wasm_f32x4_mul(vn0123, vminus_ln2_hi));
v128_t vt4567 = wasm_f32x4_add(vz4567, wasm_f32x4_mul(vn4567, vminus_ln2_hi));
v128_t vt89AB = wasm_f32x4_add(vz89AB, wasm_f32x4_mul(vn89AB, vminus_ln2_hi));
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
v128_t vp0123 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt0123));
v128_t vp4567 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt4567));
v128_t vp89AB = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt89AB));
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vp0123 = wasm_f32x4_add(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_add(vt4567, wasm_f32x4_mul(vp4567, vt4567));
vp89AB = wasm_f32x4_add(vt89AB, wasm_f32x4_mul(vp89AB, vt89AB));
const v128_t ve0123 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp0123, vs0123));
const v128_t ve4567 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp4567, vs4567));
const v128_t ve89AB = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp89AB, vs89AB));
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 11,595 | 47.92827 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmsimd-arm-rr2-lut16-p3-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.index_mask);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_lo);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
const v128_t vz0123 = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
const v128_t vzCDEF = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vxCDEF, vprescale));
v128_t vn0123 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz0123, vlog2e));
v128_t vn4567 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz4567, vlog2e));
v128_t vn89AB = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz89AB, vlog2e));
v128_t vnCDEF = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzCDEF, vlog2e));
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ven89AB = wasm_i32x4_shl(vn89AB, 19);
const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
const v128_t venCDEF = wasm_i32x4_shl(vnCDEF, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx8));
const uint32_t vidxC = wasm_u32x4_extract_lane(vidxCDEF, 0);
v128_t vlCDEF = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxC));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidxD = wasm_u32x4_extract_lane(vidxCDEF, 1);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxD), vlCDEF, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidxE = wasm_u32x4_extract_lane(vidxCDEF, 2);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxE), vlCDEF, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxB), vl89AB, 3);
const uint32_t vidxF = wasm_u32x4_extract_lane(vidxCDEF, 3);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxF), vlCDEF, 3);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vs89AB = wasm_i32x4_add(vl89AB, ven89AB);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vsCDEF = wasm_i32x4_add(vlCDEF, venCDEF);
v128_t vt0123 = wasm_f32x4_add(vz0123, wasm_f32x4_mul(vn0123, vminus_ln2_hi));
v128_t vt4567 = wasm_f32x4_add(vz4567, wasm_f32x4_mul(vn4567, vminus_ln2_hi));
v128_t vt89AB = wasm_f32x4_add(vz89AB, wasm_f32x4_mul(vn89AB, vminus_ln2_hi));
v128_t vtCDEF = wasm_f32x4_add(vzCDEF, wasm_f32x4_mul(vnCDEF, vminus_ln2_hi));
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
v128_t vp0123 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt0123));
v128_t vp4567 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt4567));
v128_t vp89AB = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt89AB));
v128_t vpCDEF = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vtCDEF));
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
vp0123 = wasm_f32x4_add(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_add(vt4567, wasm_f32x4_mul(vp4567, vt4567));
vp89AB = wasm_f32x4_add(vt89AB, wasm_f32x4_mul(vp89AB, vt89AB));
vpCDEF = wasm_f32x4_add(vtCDEF, wasm_f32x4_mul(vpCDEF, vtCDEF));
const v128_t ve0123 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp0123, vs0123));
const v128_t ve4567 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp4567, vs4567));
const v128_t ve89AB = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp89AB, vs89AB));
const v128_t veCDEF = wasm_f32x4_mul(valpha, wasm_f32x4_add(vpCDEF, vsCDEF));
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
const v128_t vyCDEF = wasm_v128_bitselect(veCDEF, vxCDEF, vsignmCDEF);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 13,543 | 50.30303 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmsimd-arm-rr2-lut16-p3-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.index_mask);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_lo);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.one);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
v128_t vxGHIJ = wasm_v128_load(input + 16);
input += 20;
const v128_t vz0123 = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
const v128_t vzCDEF = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vxCDEF, vprescale));
const v128_t vzGHIJ = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vxGHIJ, vprescale));
v128_t vn0123 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz0123, vlog2e));
v128_t vn4567 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz4567, vlog2e));
v128_t vn89AB = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz89AB, vlog2e));
v128_t vnCDEF = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzCDEF, vlog2e));
v128_t vnGHIJ = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzGHIJ, vlog2e));
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ven89AB = wasm_i32x4_shl(vn89AB, 19);
const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
const v128_t venCDEF = wasm_i32x4_shl(vnCDEF, 19);
const v128_t vidxGHIJ = wasm_i32x4_shl(wasm_v128_and(vnGHIJ, vindex_mask), 2);
const v128_t venGHIJ = wasm_i32x4_shl(vnGHIJ, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx8));
const uint32_t vidxC = wasm_u32x4_extract_lane(vidxCDEF, 0);
v128_t vlCDEF = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxC));
const uint32_t vidxG = wasm_u32x4_extract_lane(vidxGHIJ, 0);
v128_t vlGHIJ = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxG));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidxD = wasm_u32x4_extract_lane(vidxCDEF, 1);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxD), vlCDEF, 1);
const uint32_t vidxH = wasm_u32x4_extract_lane(vidxGHIJ, 1);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxH), vlGHIJ, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidxE = wasm_u32x4_extract_lane(vidxCDEF, 2);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxE), vlCDEF, 2);
const uint32_t vidxI = wasm_u32x4_extract_lane(vidxGHIJ, 2);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxI), vlGHIJ, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxB), vl89AB, 3);
const uint32_t vidxF = wasm_u32x4_extract_lane(vidxCDEF, 3);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxF), vlCDEF, 3);
const uint32_t vidxJ = wasm_u32x4_extract_lane(vidxGHIJ, 3);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxJ), vlGHIJ, 3);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vs89AB = wasm_i32x4_add(vl89AB, ven89AB);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vsCDEF = wasm_i32x4_add(vlCDEF, venCDEF);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
v128_t vsGHIJ = wasm_i32x4_add(vlGHIJ, venGHIJ);
v128_t vt0123 = wasm_f32x4_add(vz0123, wasm_f32x4_mul(vn0123, vminus_ln2_hi));
v128_t vt4567 = wasm_f32x4_add(vz4567, wasm_f32x4_mul(vn4567, vminus_ln2_hi));
v128_t vt89AB = wasm_f32x4_add(vz89AB, wasm_f32x4_mul(vn89AB, vminus_ln2_hi));
v128_t vtCDEF = wasm_f32x4_add(vzCDEF, wasm_f32x4_mul(vnCDEF, vminus_ln2_hi));
v128_t vtGHIJ = wasm_f32x4_add(vzGHIJ, wasm_f32x4_mul(vnGHIJ, vminus_ln2_hi));
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_lo), vtGHIJ);
v128_t vp0123 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt0123));
v128_t vp4567 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt4567));
v128_t vp89AB = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt89AB));
v128_t vpCDEF = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vtCDEF));
v128_t vpGHIJ = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vtGHIJ));
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
vpGHIJ = wasm_f32x4_mul(vpGHIJ, vtGHIJ);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
vsGHIJ = wasm_f32x4_sub(vsGHIJ, vone);
vp0123 = wasm_f32x4_add(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_add(vt4567, wasm_f32x4_mul(vp4567, vt4567));
vp89AB = wasm_f32x4_add(vt89AB, wasm_f32x4_mul(vp89AB, vt89AB));
vpCDEF = wasm_f32x4_add(vtCDEF, wasm_f32x4_mul(vpCDEF, vtCDEF));
vpGHIJ = wasm_f32x4_add(vtGHIJ, wasm_f32x4_mul(vpGHIJ, vtGHIJ));
const v128_t ve0123 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp0123, vs0123));
const v128_t ve4567 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp4567, vs4567));
const v128_t ve89AB = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp89AB, vs89AB));
const v128_t veCDEF = wasm_f32x4_mul(valpha, wasm_f32x4_add(vpCDEF, vsCDEF));
const v128_t veGHIJ = wasm_f32x4_mul(valpha, wasm_f32x4_add(vpGHIJ, vsGHIJ));
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
const v128_t vsignmGHIJ = wasm_i32x4_shr(vxGHIJ, 31);
vxGHIJ = wasm_f32x4_mul(vxGHIJ, vbeta);
const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
const v128_t vyCDEF = wasm_v128_bitselect(veCDEF, vxCDEF, vsignmCDEF);
const v128_t vyGHIJ = wasm_v128_bitselect(veGHIJ, vxGHIJ, vsignmGHIJ);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
wasm_v128_store(output + 16, vyGHIJ);
output += 20;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 15,491 | 52.237113 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmsimd-arm-rr2-lut16-p3-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.index_mask);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_lo);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.one);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
v128_t vxGHIJ = wasm_v128_load(input + 16);
v128_t vxKLMN = wasm_v128_load(input + 20);
input += 24;
const v128_t vz0123 = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
const v128_t vzCDEF = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vxCDEF, vprescale));
const v128_t vzGHIJ = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vxGHIJ, vprescale));
const v128_t vzKLMN = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vxKLMN, vprescale));
v128_t vn0123 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz0123, vlog2e));
v128_t vn4567 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz4567, vlog2e));
v128_t vn89AB = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz89AB, vlog2e));
v128_t vnCDEF = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzCDEF, vlog2e));
v128_t vnGHIJ = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzGHIJ, vlog2e));
v128_t vnKLMN = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzKLMN, vlog2e));
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ven89AB = wasm_i32x4_shl(vn89AB, 19);
const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
const v128_t venCDEF = wasm_i32x4_shl(vnCDEF, 19);
const v128_t vidxGHIJ = wasm_i32x4_shl(wasm_v128_and(vnGHIJ, vindex_mask), 2);
const v128_t venGHIJ = wasm_i32x4_shl(vnGHIJ, 19);
const v128_t vidxKLMN = wasm_i32x4_shl(wasm_v128_and(vnKLMN, vindex_mask), 2);
const v128_t venKLMN = wasm_i32x4_shl(vnKLMN, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx8));
const uint32_t vidxC = wasm_u32x4_extract_lane(vidxCDEF, 0);
v128_t vlCDEF = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxC));
const uint32_t vidxG = wasm_u32x4_extract_lane(vidxGHIJ, 0);
v128_t vlGHIJ = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxG));
const uint32_t vidxK = wasm_u32x4_extract_lane(vidxKLMN, 0);
v128_t vlKLMN = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxK));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidxD = wasm_u32x4_extract_lane(vidxCDEF, 1);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxD), vlCDEF, 1);
const uint32_t vidxH = wasm_u32x4_extract_lane(vidxGHIJ, 1);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxH), vlGHIJ, 1);
const uint32_t vidxL = wasm_u32x4_extract_lane(vidxKLMN, 1);
vlKLMN = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxL), vlKLMN, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidxE = wasm_u32x4_extract_lane(vidxCDEF, 2);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxE), vlCDEF, 2);
const uint32_t vidxI = wasm_u32x4_extract_lane(vidxGHIJ, 2);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxI), vlGHIJ, 2);
const uint32_t vidxM = wasm_u32x4_extract_lane(vidxKLMN, 2);
vlKLMN = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxM), vlKLMN, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxB), vl89AB, 3);
const uint32_t vidxF = wasm_u32x4_extract_lane(vidxCDEF, 3);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxF), vlCDEF, 3);
const uint32_t vidxJ = wasm_u32x4_extract_lane(vidxGHIJ, 3);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxJ), vlGHIJ, 3);
const uint32_t vidxN = wasm_u32x4_extract_lane(vidxKLMN, 3);
vlKLMN = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxN), vlKLMN, 3);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vs89AB = wasm_i32x4_add(vl89AB, ven89AB);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vsCDEF = wasm_i32x4_add(vlCDEF, venCDEF);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
v128_t vsGHIJ = wasm_i32x4_add(vlGHIJ, venGHIJ);
vnKLMN = wasm_f32x4_sub(vnKLMN, vmagic_bias);
v128_t vsKLMN = wasm_i32x4_add(vlKLMN, venKLMN);
v128_t vt0123 = wasm_f32x4_add(vz0123, wasm_f32x4_mul(vn0123, vminus_ln2_hi));
v128_t vt4567 = wasm_f32x4_add(vz4567, wasm_f32x4_mul(vn4567, vminus_ln2_hi));
v128_t vt89AB = wasm_f32x4_add(vz89AB, wasm_f32x4_mul(vn89AB, vminus_ln2_hi));
v128_t vtCDEF = wasm_f32x4_add(vzCDEF, wasm_f32x4_mul(vnCDEF, vminus_ln2_hi));
v128_t vtGHIJ = wasm_f32x4_add(vzGHIJ, wasm_f32x4_mul(vnGHIJ, vminus_ln2_hi));
v128_t vtKLMN = wasm_f32x4_add(vzKLMN, wasm_f32x4_mul(vnKLMN, vminus_ln2_hi));
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_lo), vtGHIJ);
vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vminus_ln2_lo), vtKLMN);
v128_t vp0123 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt0123));
v128_t vp4567 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt4567));
v128_t vp89AB = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt89AB));
v128_t vpCDEF = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vtCDEF));
v128_t vpGHIJ = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vtGHIJ));
v128_t vpKLMN = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vtKLMN));
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
vpGHIJ = wasm_f32x4_mul(vpGHIJ, vtGHIJ);
vpKLMN = wasm_f32x4_mul(vpKLMN, vtKLMN);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
vsGHIJ = wasm_f32x4_sub(vsGHIJ, vone);
vtKLMN = wasm_f32x4_mul(vtKLMN, vsKLMN);
vsKLMN = wasm_f32x4_sub(vsKLMN, vone);
vp0123 = wasm_f32x4_add(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_add(vt4567, wasm_f32x4_mul(vp4567, vt4567));
vp89AB = wasm_f32x4_add(vt89AB, wasm_f32x4_mul(vp89AB, vt89AB));
vpCDEF = wasm_f32x4_add(vtCDEF, wasm_f32x4_mul(vpCDEF, vtCDEF));
vpGHIJ = wasm_f32x4_add(vtGHIJ, wasm_f32x4_mul(vpGHIJ, vtGHIJ));
vpKLMN = wasm_f32x4_add(vtKLMN, wasm_f32x4_mul(vpKLMN, vtKLMN));
const v128_t ve0123 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp0123, vs0123));
const v128_t ve4567 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp4567, vs4567));
const v128_t ve89AB = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp89AB, vs89AB));
const v128_t veCDEF = wasm_f32x4_mul(valpha, wasm_f32x4_add(vpCDEF, vsCDEF));
const v128_t veGHIJ = wasm_f32x4_mul(valpha, wasm_f32x4_add(vpGHIJ, vsGHIJ));
const v128_t veKLMN = wasm_f32x4_mul(valpha, wasm_f32x4_add(vpKLMN, vsKLMN));
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
const v128_t vsignmGHIJ = wasm_i32x4_shr(vxGHIJ, 31);
vxGHIJ = wasm_f32x4_mul(vxGHIJ, vbeta);
const v128_t vsignmKLMN = wasm_i32x4_shr(vxKLMN, 31);
vxKLMN = wasm_f32x4_mul(vxKLMN, vbeta);
const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
const v128_t vyCDEF = wasm_v128_bitselect(veCDEF, vxCDEF, vsignmCDEF);
const v128_t vyGHIJ = wasm_v128_bitselect(veGHIJ, vxGHIJ, vsignmGHIJ);
const v128_t vyKLMN = wasm_v128_bitselect(veKLMN, vxKLMN, vsignmKLMN);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
wasm_v128_store(output + 16, vyGHIJ);
wasm_v128_store(output + 20, vyKLMN);
output += 24;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 17,439 | 53.842767 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmsimd-arm-rr2-lut16-p3-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.index_mask);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_lo);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.one);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 5,640 | 40.477941 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmsimd-arm-rr2-lut16-p3-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasmsimd_arm_rr2_lut16_p3_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.index_mask);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_lo);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
const v128_t vz0123 = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
v128_t vn0123 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz0123, vlog2e));
v128_t vn4567 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz4567, vlog2e));
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx7), vl4567, 3);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
v128_t vt0123 = wasm_f32x4_add(vz0123, wasm_f32x4_mul(vn0123, vminus_ln2_hi));
v128_t vt4567 = wasm_f32x4_add(vz4567, wasm_f32x4_mul(vn4567, vminus_ln2_hi));
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
v128_t vp0123 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt0123));
v128_t vp4567 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt4567));
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vp0123 = wasm_f32x4_add(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_add(vt4567, wasm_f32x4_mul(vp4567, vt4567));
const v128_t ve0123 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp0123, vs0123));
const v128_t ve4567 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp4567, vs4567));
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 9,644 | 44.928571 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmsimd-arm-rr2-p6-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.log2e);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_lo);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.one);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
input += 12;
const v128_t vz0123 = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 8,114 | 41.046632 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmsimd-arm-rr2-p6-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.log2e);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_lo);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
const v128_t vz0123 = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
const v128_t vzCDEF = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vxCDEF, vprescale));
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vlog2e), vmagic_bias);
v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vzCDEF);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtCDEF), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vtCDEF);
const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
const v128_t veCDEF = wasm_f32x4_mul(wasm_f32x4_add(vpCDEF, vsCDEF), valpha);
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
const v128_t vyCDEF = wasm_v128_bitselect(veCDEF, vxCDEF, vsignmCDEF);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 9,361 | 42.953052 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmsimd-arm-rr2-p6-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.log2e);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_lo);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.one);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
v128_t vxGHIJ = wasm_v128_load(input + 16);
input += 20;
const v128_t vz0123 = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
const v128_t vzCDEF = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vxCDEF, vprescale));
const v128_t vzGHIJ = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vxGHIJ, vprescale));
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vlog2e), vmagic_bias);
v128_t vnGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vzGHIJ, vlog2e), vmagic_bias);
v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vzCDEF);
v128_t vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_hi), vzGHIJ);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_lo), vtGHIJ);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtCDEF), vc5);
v128_t vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtGHIJ), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc4);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc3);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc2);
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
vpGHIJ = wasm_f32x4_mul(vpGHIJ, vtGHIJ);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
vsGHIJ = wasm_f32x4_sub(vsGHIJ, vone);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vtCDEF);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vtGHIJ);
const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
const v128_t veCDEF = wasm_f32x4_mul(wasm_f32x4_add(vpCDEF, vsCDEF), valpha);
const v128_t veGHIJ = wasm_f32x4_mul(wasm_f32x4_add(vpGHIJ, vsGHIJ), valpha);
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
const v128_t vsignmGHIJ = wasm_i32x4_shr(vxGHIJ, 31);
vxGHIJ = wasm_f32x4_mul(vxGHIJ, vbeta);
const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
const v128_t vyCDEF = wasm_v128_bitselect(veCDEF, vxCDEF, vsignmCDEF);
const v128_t vyGHIJ = wasm_v128_bitselect(veGHIJ, vxGHIJ, vsignmGHIJ);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
wasm_v128_store(output + 16, vyGHIJ);
output += 20;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 10,608 | 44.532189 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmsimd-arm-rr2-p6-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.log2e);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_lo);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.one);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
v128_t vxGHIJ = wasm_v128_load(input + 16);
v128_t vxKLMN = wasm_v128_load(input + 20);
input += 24;
const v128_t vz0123 = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
const v128_t vzCDEF = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vxCDEF, vprescale));
const v128_t vzGHIJ = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vxGHIJ, vprescale));
const v128_t vzKLMN = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vxKLMN, vprescale));
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vlog2e), vmagic_bias);
v128_t vnGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vzGHIJ, vlog2e), vmagic_bias);
v128_t vnKLMN = wasm_f32x4_add(wasm_f32x4_mul(vzKLMN, vlog2e), vmagic_bias);
v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
v128_t vsKLMN = wasm_i32x4_shl(vnKLMN, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
vnKLMN = wasm_f32x4_sub(vnKLMN, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vzCDEF);
v128_t vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_hi), vzGHIJ);
v128_t vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vminus_ln2_hi), vzKLMN);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_lo), vtGHIJ);
vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vminus_ln2_lo), vtKLMN);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtCDEF), vc5);
v128_t vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtGHIJ), vc5);
v128_t vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtKLMN), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc4);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc4);
vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vpKLMN, vtKLMN), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc3);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc3);
vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vpKLMN, vtKLMN), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc2);
vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vpKLMN, vtKLMN), vc2);
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
vpGHIJ = wasm_f32x4_mul(vpGHIJ, vtGHIJ);
vpKLMN = wasm_f32x4_mul(vpKLMN, vtKLMN);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
vsGHIJ = wasm_f32x4_sub(vsGHIJ, vone);
vtKLMN = wasm_f32x4_mul(vtKLMN, vsKLMN);
vsKLMN = wasm_f32x4_sub(vsKLMN, vone);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vtCDEF);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vtGHIJ);
vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vpKLMN, vtKLMN), vtKLMN);
const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
const v128_t veCDEF = wasm_f32x4_mul(wasm_f32x4_add(vpCDEF, vsCDEF), valpha);
const v128_t veGHIJ = wasm_f32x4_mul(wasm_f32x4_add(vpGHIJ, vsGHIJ), valpha);
const v128_t veKLMN = wasm_f32x4_mul(wasm_f32x4_add(vpKLMN, vsKLMN), valpha);
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
const v128_t vsignmGHIJ = wasm_i32x4_shr(vxGHIJ, 31);
vxGHIJ = wasm_f32x4_mul(vxGHIJ, vbeta);
const v128_t vsignmKLMN = wasm_i32x4_shr(vxKLMN, 31);
vxKLMN = wasm_f32x4_mul(vxKLMN, vbeta);
const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
const v128_t vyCDEF = wasm_v128_bitselect(veCDEF, vxCDEF, vsignmCDEF);
const v128_t vyGHIJ = wasm_v128_bitselect(veGHIJ, vxGHIJ, vsignmGHIJ);
const v128_t vyKLMN = wasm_v128_bitselect(veKLMN, vxKLMN, vsignmKLMN);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
wasm_v128_store(output + 16, vyGHIJ);
wasm_v128_store(output + 20, vyKLMN);
output += 24;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 11,855 | 45.86166 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmsimd-arm-rr2-p6-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.log2e);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_lo);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.one);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 4,261 | 37.053571 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmsimd-arm-rr2-p6-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.log2e);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_lo);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
const v128_t vz0123 = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_max(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 6,864 | 38.682081 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmsimd-x86-rr2-lut16-p3-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.index_mask);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_lo);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.one);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
input += 12;
const v128_t vz0123 = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
v128_t vn0123 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz0123, vlog2e));
v128_t vn4567 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz4567, vlog2e));
v128_t vn89AB = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz89AB, vlog2e));
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ven89AB = wasm_i32x4_shl(vn89AB, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx8));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxB), vl89AB, 3);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vs89AB = wasm_i32x4_add(vl89AB, ven89AB);
v128_t vt0123 = wasm_f32x4_add(vz0123, wasm_f32x4_mul(vn0123, vminus_ln2_hi));
v128_t vt4567 = wasm_f32x4_add(vz4567, wasm_f32x4_mul(vn4567, vminus_ln2_hi));
v128_t vt89AB = wasm_f32x4_add(vz89AB, wasm_f32x4_mul(vn89AB, vminus_ln2_hi));
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
v128_t vp0123 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt0123));
v128_t vp4567 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt4567));
v128_t vp89AB = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt89AB));
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vp0123 = wasm_f32x4_add(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_add(vt4567, wasm_f32x4_mul(vp4567, vt4567));
vp89AB = wasm_f32x4_add(vt89AB, wasm_f32x4_mul(vp89AB, vt89AB));
const v128_t ve0123 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp0123, vs0123));
const v128_t ve4567 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp4567, vs4567));
const v128_t ve89AB = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp89AB, vs89AB));
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 11,600 | 47.949367 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmsimd-x86-rr2-lut16-p3-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.index_mask);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_lo);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
const v128_t vz0123 = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
const v128_t vzCDEF = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vxCDEF, vprescale));
v128_t vn0123 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz0123, vlog2e));
v128_t vn4567 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz4567, vlog2e));
v128_t vn89AB = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz89AB, vlog2e));
v128_t vnCDEF = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzCDEF, vlog2e));
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ven89AB = wasm_i32x4_shl(vn89AB, 19);
const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
const v128_t venCDEF = wasm_i32x4_shl(vnCDEF, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx8));
const uint32_t vidxC = wasm_u32x4_extract_lane(vidxCDEF, 0);
v128_t vlCDEF = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxC));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidxD = wasm_u32x4_extract_lane(vidxCDEF, 1);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxD), vlCDEF, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidxE = wasm_u32x4_extract_lane(vidxCDEF, 2);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxE), vlCDEF, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxB), vl89AB, 3);
const uint32_t vidxF = wasm_u32x4_extract_lane(vidxCDEF, 3);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxF), vlCDEF, 3);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vs89AB = wasm_i32x4_add(vl89AB, ven89AB);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vsCDEF = wasm_i32x4_add(vlCDEF, venCDEF);
v128_t vt0123 = wasm_f32x4_add(vz0123, wasm_f32x4_mul(vn0123, vminus_ln2_hi));
v128_t vt4567 = wasm_f32x4_add(vz4567, wasm_f32x4_mul(vn4567, vminus_ln2_hi));
v128_t vt89AB = wasm_f32x4_add(vz89AB, wasm_f32x4_mul(vn89AB, vminus_ln2_hi));
v128_t vtCDEF = wasm_f32x4_add(vzCDEF, wasm_f32x4_mul(vnCDEF, vminus_ln2_hi));
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
v128_t vp0123 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt0123));
v128_t vp4567 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt4567));
v128_t vp89AB = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt89AB));
v128_t vpCDEF = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vtCDEF));
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
vp0123 = wasm_f32x4_add(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_add(vt4567, wasm_f32x4_mul(vp4567, vt4567));
vp89AB = wasm_f32x4_add(vt89AB, wasm_f32x4_mul(vp89AB, vt89AB));
vpCDEF = wasm_f32x4_add(vtCDEF, wasm_f32x4_mul(vpCDEF, vtCDEF));
const v128_t ve0123 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp0123, vs0123));
const v128_t ve4567 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp4567, vs4567));
const v128_t ve89AB = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp89AB, vs89AB));
const v128_t veCDEF = wasm_f32x4_mul(valpha, wasm_f32x4_add(vpCDEF, vsCDEF));
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
const v128_t vyCDEF = wasm_v128_bitselect(veCDEF, vxCDEF, vsignmCDEF);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 13,549 | 50.325758 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmsimd-x86-rr2-lut16-p3-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.index_mask);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_lo);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.one);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
v128_t vxGHIJ = wasm_v128_load(input + 16);
input += 20;
const v128_t vz0123 = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
const v128_t vzCDEF = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vxCDEF, vprescale));
const v128_t vzGHIJ = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vxGHIJ, vprescale));
v128_t vn0123 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz0123, vlog2e));
v128_t vn4567 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz4567, vlog2e));
v128_t vn89AB = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz89AB, vlog2e));
v128_t vnCDEF = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzCDEF, vlog2e));
v128_t vnGHIJ = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzGHIJ, vlog2e));
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ven89AB = wasm_i32x4_shl(vn89AB, 19);
const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
const v128_t venCDEF = wasm_i32x4_shl(vnCDEF, 19);
const v128_t vidxGHIJ = wasm_i32x4_shl(wasm_v128_and(vnGHIJ, vindex_mask), 2);
const v128_t venGHIJ = wasm_i32x4_shl(vnGHIJ, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx8));
const uint32_t vidxC = wasm_u32x4_extract_lane(vidxCDEF, 0);
v128_t vlCDEF = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxC));
const uint32_t vidxG = wasm_u32x4_extract_lane(vidxGHIJ, 0);
v128_t vlGHIJ = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxG));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidxD = wasm_u32x4_extract_lane(vidxCDEF, 1);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxD), vlCDEF, 1);
const uint32_t vidxH = wasm_u32x4_extract_lane(vidxGHIJ, 1);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxH), vlGHIJ, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidxE = wasm_u32x4_extract_lane(vidxCDEF, 2);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxE), vlCDEF, 2);
const uint32_t vidxI = wasm_u32x4_extract_lane(vidxGHIJ, 2);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxI), vlGHIJ, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxB), vl89AB, 3);
const uint32_t vidxF = wasm_u32x4_extract_lane(vidxCDEF, 3);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxF), vlCDEF, 3);
const uint32_t vidxJ = wasm_u32x4_extract_lane(vidxGHIJ, 3);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxJ), vlGHIJ, 3);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vs89AB = wasm_i32x4_add(vl89AB, ven89AB);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vsCDEF = wasm_i32x4_add(vlCDEF, venCDEF);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
v128_t vsGHIJ = wasm_i32x4_add(vlGHIJ, venGHIJ);
v128_t vt0123 = wasm_f32x4_add(vz0123, wasm_f32x4_mul(vn0123, vminus_ln2_hi));
v128_t vt4567 = wasm_f32x4_add(vz4567, wasm_f32x4_mul(vn4567, vminus_ln2_hi));
v128_t vt89AB = wasm_f32x4_add(vz89AB, wasm_f32x4_mul(vn89AB, vminus_ln2_hi));
v128_t vtCDEF = wasm_f32x4_add(vzCDEF, wasm_f32x4_mul(vnCDEF, vminus_ln2_hi));
v128_t vtGHIJ = wasm_f32x4_add(vzGHIJ, wasm_f32x4_mul(vnGHIJ, vminus_ln2_hi));
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_lo), vtGHIJ);
v128_t vp0123 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt0123));
v128_t vp4567 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt4567));
v128_t vp89AB = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt89AB));
v128_t vpCDEF = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vtCDEF));
v128_t vpGHIJ = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vtGHIJ));
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
vpGHIJ = wasm_f32x4_mul(vpGHIJ, vtGHIJ);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
vsGHIJ = wasm_f32x4_sub(vsGHIJ, vone);
vp0123 = wasm_f32x4_add(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_add(vt4567, wasm_f32x4_mul(vp4567, vt4567));
vp89AB = wasm_f32x4_add(vt89AB, wasm_f32x4_mul(vp89AB, vt89AB));
vpCDEF = wasm_f32x4_add(vtCDEF, wasm_f32x4_mul(vpCDEF, vtCDEF));
vpGHIJ = wasm_f32x4_add(vtGHIJ, wasm_f32x4_mul(vpGHIJ, vtGHIJ));
const v128_t ve0123 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp0123, vs0123));
const v128_t ve4567 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp4567, vs4567));
const v128_t ve89AB = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp89AB, vs89AB));
const v128_t veCDEF = wasm_f32x4_mul(valpha, wasm_f32x4_add(vpCDEF, vsCDEF));
const v128_t veGHIJ = wasm_f32x4_mul(valpha, wasm_f32x4_add(vpGHIJ, vsGHIJ));
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
const v128_t vsignmGHIJ = wasm_i32x4_shr(vxGHIJ, 31);
vxGHIJ = wasm_f32x4_mul(vxGHIJ, vbeta);
const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
const v128_t vyCDEF = wasm_v128_bitselect(veCDEF, vxCDEF, vsignmCDEF);
const v128_t vyGHIJ = wasm_v128_bitselect(veGHIJ, vxGHIJ, vsignmGHIJ);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
wasm_v128_store(output + 16, vyGHIJ);
output += 20;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 15,498 | 52.261168 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmsimd-x86-rr2-lut16-p3-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.index_mask);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_lo);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.one);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
v128_t vxGHIJ = wasm_v128_load(input + 16);
v128_t vxKLMN = wasm_v128_load(input + 20);
input += 24;
const v128_t vz0123 = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
const v128_t vzCDEF = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vxCDEF, vprescale));
const v128_t vzGHIJ = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vxGHIJ, vprescale));
const v128_t vzKLMN = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vxKLMN, vprescale));
v128_t vn0123 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz0123, vlog2e));
v128_t vn4567 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz4567, vlog2e));
v128_t vn89AB = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz89AB, vlog2e));
v128_t vnCDEF = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzCDEF, vlog2e));
v128_t vnGHIJ = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzGHIJ, vlog2e));
v128_t vnKLMN = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzKLMN, vlog2e));
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
const v128_t vidx89AB = wasm_i32x4_shl(wasm_v128_and(vn89AB, vindex_mask), 2);
const v128_t ven89AB = wasm_i32x4_shl(vn89AB, 19);
const v128_t vidxCDEF = wasm_i32x4_shl(wasm_v128_and(vnCDEF, vindex_mask), 2);
const v128_t venCDEF = wasm_i32x4_shl(vnCDEF, 19);
const v128_t vidxGHIJ = wasm_i32x4_shl(wasm_v128_and(vnGHIJ, vindex_mask), 2);
const v128_t venGHIJ = wasm_i32x4_shl(vnGHIJ, 19);
const v128_t vidxKLMN = wasm_i32x4_shl(wasm_v128_and(vnKLMN, vindex_mask), 2);
const v128_t venKLMN = wasm_i32x4_shl(vnKLMN, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4));
const uint32_t vidx8 = wasm_u32x4_extract_lane(vidx89AB, 0);
v128_t vl89AB = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx8));
const uint32_t vidxC = wasm_u32x4_extract_lane(vidxCDEF, 0);
v128_t vlCDEF = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxC));
const uint32_t vidxG = wasm_u32x4_extract_lane(vidxGHIJ, 0);
v128_t vlGHIJ = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxG));
const uint32_t vidxK = wasm_u32x4_extract_lane(vidxKLMN, 0);
v128_t vlKLMN = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxK));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx9 = wasm_u32x4_extract_lane(vidx89AB, 1);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx9), vl89AB, 1);
const uint32_t vidxD = wasm_u32x4_extract_lane(vidxCDEF, 1);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxD), vlCDEF, 1);
const uint32_t vidxH = wasm_u32x4_extract_lane(vidxGHIJ, 1);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxH), vlGHIJ, 1);
const uint32_t vidxL = wasm_u32x4_extract_lane(vidxKLMN, 1);
vlKLMN = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxL), vlKLMN, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidxA = wasm_u32x4_extract_lane(vidx89AB, 2);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxA), vl89AB, 2);
const uint32_t vidxE = wasm_u32x4_extract_lane(vidxCDEF, 2);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxE), vlCDEF, 2);
const uint32_t vidxI = wasm_u32x4_extract_lane(vidxGHIJ, 2);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxI), vlGHIJ, 2);
const uint32_t vidxM = wasm_u32x4_extract_lane(vidxKLMN, 2);
vlKLMN = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxM), vlKLMN, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx7), vl4567, 3);
const uint32_t vidxB = wasm_u32x4_extract_lane(vidx89AB, 3);
vl89AB = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxB), vl89AB, 3);
const uint32_t vidxF = wasm_u32x4_extract_lane(vidxCDEF, 3);
vlCDEF = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxF), vlCDEF, 3);
const uint32_t vidxJ = wasm_u32x4_extract_lane(vidxGHIJ, 3);
vlGHIJ = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxJ), vlGHIJ, 3);
const uint32_t vidxN = wasm_u32x4_extract_lane(vidxKLMN, 3);
vlKLMN = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidxN), vlKLMN, 3);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vs89AB = wasm_i32x4_add(vl89AB, ven89AB);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vsCDEF = wasm_i32x4_add(vlCDEF, venCDEF);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
v128_t vsGHIJ = wasm_i32x4_add(vlGHIJ, venGHIJ);
vnKLMN = wasm_f32x4_sub(vnKLMN, vmagic_bias);
v128_t vsKLMN = wasm_i32x4_add(vlKLMN, venKLMN);
v128_t vt0123 = wasm_f32x4_add(vz0123, wasm_f32x4_mul(vn0123, vminus_ln2_hi));
v128_t vt4567 = wasm_f32x4_add(vz4567, wasm_f32x4_mul(vn4567, vminus_ln2_hi));
v128_t vt89AB = wasm_f32x4_add(vz89AB, wasm_f32x4_mul(vn89AB, vminus_ln2_hi));
v128_t vtCDEF = wasm_f32x4_add(vzCDEF, wasm_f32x4_mul(vnCDEF, vminus_ln2_hi));
v128_t vtGHIJ = wasm_f32x4_add(vzGHIJ, wasm_f32x4_mul(vnGHIJ, vminus_ln2_hi));
v128_t vtKLMN = wasm_f32x4_add(vzKLMN, wasm_f32x4_mul(vnKLMN, vminus_ln2_hi));
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_lo), vtGHIJ);
vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vminus_ln2_lo), vtKLMN);
v128_t vp0123 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt0123));
v128_t vp4567 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt4567));
v128_t vp89AB = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt89AB));
v128_t vpCDEF = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vtCDEF));
v128_t vpGHIJ = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vtGHIJ));
v128_t vpKLMN = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vtKLMN));
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
vpGHIJ = wasm_f32x4_mul(vpGHIJ, vtGHIJ);
vpKLMN = wasm_f32x4_mul(vpKLMN, vtKLMN);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
vsGHIJ = wasm_f32x4_sub(vsGHIJ, vone);
vtKLMN = wasm_f32x4_mul(vtKLMN, vsKLMN);
vsKLMN = wasm_f32x4_sub(vsKLMN, vone);
vp0123 = wasm_f32x4_add(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_add(vt4567, wasm_f32x4_mul(vp4567, vt4567));
vp89AB = wasm_f32x4_add(vt89AB, wasm_f32x4_mul(vp89AB, vt89AB));
vpCDEF = wasm_f32x4_add(vtCDEF, wasm_f32x4_mul(vpCDEF, vtCDEF));
vpGHIJ = wasm_f32x4_add(vtGHIJ, wasm_f32x4_mul(vpGHIJ, vtGHIJ));
vpKLMN = wasm_f32x4_add(vtKLMN, wasm_f32x4_mul(vpKLMN, vtKLMN));
const v128_t ve0123 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp0123, vs0123));
const v128_t ve4567 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp4567, vs4567));
const v128_t ve89AB = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp89AB, vs89AB));
const v128_t veCDEF = wasm_f32x4_mul(valpha, wasm_f32x4_add(vpCDEF, vsCDEF));
const v128_t veGHIJ = wasm_f32x4_mul(valpha, wasm_f32x4_add(vpGHIJ, vsGHIJ));
const v128_t veKLMN = wasm_f32x4_mul(valpha, wasm_f32x4_add(vpKLMN, vsKLMN));
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
const v128_t vsignmGHIJ = wasm_i32x4_shr(vxGHIJ, 31);
vxGHIJ = wasm_f32x4_mul(vxGHIJ, vbeta);
const v128_t vsignmKLMN = wasm_i32x4_shr(vxKLMN, 31);
vxKLMN = wasm_f32x4_mul(vxKLMN, vbeta);
const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
const v128_t vyCDEF = wasm_v128_bitselect(veCDEF, vxCDEF, vsignmCDEF);
const v128_t vyGHIJ = wasm_v128_bitselect(veGHIJ, vxGHIJ, vsignmGHIJ);
const v128_t vyKLMN = wasm_v128_bitselect(veKLMN, vxKLMN, vsignmKLMN);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
wasm_v128_store(output + 16, vyGHIJ);
wasm_v128_store(output + 20, vyKLMN);
output += 24;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 17,447 | 53.867925 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmsimd-x86-rr2-lut16-p3-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.index_mask);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_lo);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.one);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 5,642 | 40.492647 | 116 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmsimd-x86-rr2-lut16-p3-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-lut16-p3.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
void xnn_f32_velu_ukernel__wasmsimd_x86_rr2_lut16_p3_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.log2e);
const v128_t vindex_mask = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.index_mask);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_lo);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
const v128_t vz0123 = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
v128_t vn0123 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz0123, vlog2e));
v128_t vn4567 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz4567, vlog2e));
const v128_t vidx0123 = wasm_i32x4_shl(wasm_v128_and(vn0123, vindex_mask), 2);
const v128_t ven0123 = wasm_i32x4_shl(vn0123, 19);
const v128_t vidx4567 = wasm_i32x4_shl(wasm_v128_and(vn4567, vindex_mask), 2);
const v128_t ven4567 = wasm_i32x4_shl(vn4567, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx0123, 0);
v128_t vl0123 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx4 = wasm_u32x4_extract_lane(vidx4567, 0);
v128_t vl4567 = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx4));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx0123, 1);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl0123, 1);
const uint32_t vidx5 = wasm_u32x4_extract_lane(vidx4567, 1);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx5), vl4567, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx0123, 2);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl0123, 2);
const uint32_t vidx6 = wasm_u32x4_extract_lane(vidx4567, 2);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx6), vl4567, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx0123, 3);
vl0123 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl0123, 3);
const uint32_t vidx7 = wasm_u32x4_extract_lane(vidx4567, 3);
vl4567 = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx7), vl4567, 3);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
v128_t vs0123 = wasm_i32x4_add(vl0123, ven0123);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vs4567 = wasm_i32x4_add(vl4567, ven4567);
v128_t vt0123 = wasm_f32x4_add(vz0123, wasm_f32x4_mul(vn0123, vminus_ln2_hi));
v128_t vt4567 = wasm_f32x4_add(vz4567, wasm_f32x4_mul(vn4567, vminus_ln2_hi));
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
v128_t vp0123 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt0123));
v128_t vp4567 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt4567));
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vp0123 = wasm_f32x4_add(vt0123, wasm_f32x4_mul(vp0123, vt0123));
vp4567 = wasm_f32x4_add(vt4567, wasm_f32x4_mul(vp4567, vt4567));
const v128_t ve0123 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp0123, vs0123));
const v128_t ve4567 = wasm_f32x4_mul(valpha, wasm_f32x4_add(vp4567, vs4567));
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vlog2e));
const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
const v128_t ven = wasm_i32x4_shl(vn, 19);
const uint32_t vidx0 = wasm_u32x4_extract_lane(vidx, 0);
v128_t vl = wasm_v128_load32_zero((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx0));
const uint32_t vidx1 = wasm_u32x4_extract_lane(vidx, 1);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx1), vl, 1);
const uint32_t vidx2 = wasm_u32x4_extract_lane(vidx, 2);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx2), vl, 2);
const uint32_t vidx3 = wasm_u32x4_extract_lane(vidx, 3);
vl = wasm_v128_load32_lane((const void*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx3), vl, 3);
v128_t vs = wasm_i32x4_add(vl, ven);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vminus_ln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vminus_ln2_lo));
v128_t vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vc3, vt));
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(vt, wasm_f32x4_mul(vp, vt));
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 9,648 | 44.947619 | 124 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmsimd-x86-rr2-p6-x12.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x12(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.log2e);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_lo);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.one);
for (; batch >= 12 * sizeof(float); batch -= 12 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
input += 12;
const v128_t vz0123 = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
output += 12;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_pmax(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 8,119 | 41.072539 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmsimd-x86-rr2-p6-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.log2e);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_lo);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.one);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
const v128_t vz0123 = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
const v128_t vzCDEF = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vxCDEF, vprescale));
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vlog2e), vmagic_bias);
v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vzCDEF);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtCDEF), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vtCDEF);
const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
const v128_t veCDEF = wasm_f32x4_mul(wasm_f32x4_add(vpCDEF, vsCDEF), valpha);
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
const v128_t vyCDEF = wasm_v128_bitselect(veCDEF, vxCDEF, vsignmCDEF);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_pmax(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 9,367 | 42.981221 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmsimd-x86-rr2-p6-x20.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x20(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.log2e);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_lo);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.one);
for (; batch >= 20 * sizeof(float); batch -= 20 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
v128_t vxGHIJ = wasm_v128_load(input + 16);
input += 20;
const v128_t vz0123 = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
const v128_t vzCDEF = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vxCDEF, vprescale));
const v128_t vzGHIJ = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vxGHIJ, vprescale));
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vlog2e), vmagic_bias);
v128_t vnGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vzGHIJ, vlog2e), vmagic_bias);
v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vzCDEF);
v128_t vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_hi), vzGHIJ);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_lo), vtGHIJ);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtCDEF), vc5);
v128_t vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtGHIJ), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc4);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc3);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc2);
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
vpGHIJ = wasm_f32x4_mul(vpGHIJ, vtGHIJ);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
vsGHIJ = wasm_f32x4_sub(vsGHIJ, vone);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vtCDEF);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vtGHIJ);
const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
const v128_t veCDEF = wasm_f32x4_mul(wasm_f32x4_add(vpCDEF, vsCDEF), valpha);
const v128_t veGHIJ = wasm_f32x4_mul(wasm_f32x4_add(vpGHIJ, vsGHIJ), valpha);
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
const v128_t vsignmGHIJ = wasm_i32x4_shr(vxGHIJ, 31);
vxGHIJ = wasm_f32x4_mul(vxGHIJ, vbeta);
const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
const v128_t vyCDEF = wasm_v128_bitselect(veCDEF, vxCDEF, vsignmCDEF);
const v128_t vyGHIJ = wasm_v128_bitselect(veGHIJ, vxGHIJ, vsignmGHIJ);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
wasm_v128_store(output + 16, vyGHIJ);
output += 20;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_pmax(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 10,615 | 44.562232 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmsimd-x86-rr2-p6-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x24(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.log2e);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_lo);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.one);
for (; batch >= 24 * sizeof(float); batch -= 24 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
v128_t vxGHIJ = wasm_v128_load(input + 16);
v128_t vxKLMN = wasm_v128_load(input + 20);
input += 24;
const v128_t vz0123 = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
const v128_t vz89AB = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx89AB, vprescale));
const v128_t vzCDEF = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vxCDEF, vprescale));
const v128_t vzGHIJ = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vxGHIJ, vprescale));
const v128_t vzKLMN = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vxKLMN, vprescale));
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
v128_t vn89AB = wasm_f32x4_add(wasm_f32x4_mul(vz89AB, vlog2e), vmagic_bias);
v128_t vnCDEF = wasm_f32x4_add(wasm_f32x4_mul(vzCDEF, vlog2e), vmagic_bias);
v128_t vnGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vzGHIJ, vlog2e), vmagic_bias);
v128_t vnKLMN = wasm_f32x4_add(wasm_f32x4_mul(vzKLMN, vlog2e), vmagic_bias);
v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
v128_t vsKLMN = wasm_i32x4_shl(vnKLMN, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
vnKLMN = wasm_f32x4_sub(vnKLMN, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
v128_t vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_hi), vz89AB);
v128_t vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_hi), vzCDEF);
v128_t vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_hi), vzGHIJ);
v128_t vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vminus_ln2_hi), vzKLMN);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
vt89AB = wasm_f32x4_add(wasm_f32x4_mul(vn89AB, vminus_ln2_lo), vt89AB);
vtCDEF = wasm_f32x4_add(wasm_f32x4_mul(vnCDEF, vminus_ln2_lo), vtCDEF);
vtGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vnGHIJ, vminus_ln2_lo), vtGHIJ);
vtKLMN = wasm_f32x4_add(wasm_f32x4_mul(vnKLMN, vminus_ln2_lo), vtKLMN);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
v128_t vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt89AB), vc5);
v128_t vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtCDEF), vc5);
v128_t vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtGHIJ), vc5);
v128_t vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vc6, vtKLMN), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc4);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc4);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc4);
vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vpKLMN, vtKLMN), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc3);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc3);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc3);
vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vpKLMN, vtKLMN), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vc2);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vc2);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vc2);
vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vpKLMN, vtKLMN), vc2);
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vp89AB = wasm_f32x4_mul(vp89AB, vt89AB);
vpCDEF = wasm_f32x4_mul(vpCDEF, vtCDEF);
vpGHIJ = wasm_f32x4_mul(vpGHIJ, vtGHIJ);
vpKLMN = wasm_f32x4_mul(vpKLMN, vtKLMN);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
vs89AB = wasm_f32x4_sub(vs89AB, vone);
vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
vsCDEF = wasm_f32x4_sub(vsCDEF, vone);
vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
vsGHIJ = wasm_f32x4_sub(vsGHIJ, vone);
vtKLMN = wasm_f32x4_mul(vtKLMN, vsKLMN);
vsKLMN = wasm_f32x4_sub(vsKLMN, vone);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
vp89AB = wasm_f32x4_add(wasm_f32x4_mul(vp89AB, vt89AB), vt89AB);
vpCDEF = wasm_f32x4_add(wasm_f32x4_mul(vpCDEF, vtCDEF), vtCDEF);
vpGHIJ = wasm_f32x4_add(wasm_f32x4_mul(vpGHIJ, vtGHIJ), vtGHIJ);
vpKLMN = wasm_f32x4_add(wasm_f32x4_mul(vpKLMN, vtKLMN), vtKLMN);
const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
const v128_t ve89AB = wasm_f32x4_mul(wasm_f32x4_add(vp89AB, vs89AB), valpha);
const v128_t veCDEF = wasm_f32x4_mul(wasm_f32x4_add(vpCDEF, vsCDEF), valpha);
const v128_t veGHIJ = wasm_f32x4_mul(wasm_f32x4_add(vpGHIJ, vsGHIJ), valpha);
const v128_t veKLMN = wasm_f32x4_mul(wasm_f32x4_add(vpKLMN, vsKLMN), valpha);
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vsignm89AB = wasm_i32x4_shr(vx89AB, 31);
vx89AB = wasm_f32x4_mul(vx89AB, vbeta);
const v128_t vsignmCDEF = wasm_i32x4_shr(vxCDEF, 31);
vxCDEF = wasm_f32x4_mul(vxCDEF, vbeta);
const v128_t vsignmGHIJ = wasm_i32x4_shr(vxGHIJ, 31);
vxGHIJ = wasm_f32x4_mul(vxGHIJ, vbeta);
const v128_t vsignmKLMN = wasm_i32x4_shr(vxKLMN, 31);
vxKLMN = wasm_f32x4_mul(vxKLMN, vbeta);
const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
const v128_t vy89AB = wasm_v128_bitselect(ve89AB, vx89AB, vsignm89AB);
const v128_t vyCDEF = wasm_v128_bitselect(veCDEF, vxCDEF, vsignmCDEF);
const v128_t vyGHIJ = wasm_v128_bitselect(veGHIJ, vxGHIJ, vsignmGHIJ);
const v128_t vyKLMN = wasm_v128_bitselect(veKLMN, vxKLMN, vsignmKLMN);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
wasm_v128_store(output + 16, vyGHIJ);
wasm_v128_store(output + 20, vyKLMN);
output += 24;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_pmax(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 11,863 | 45.893281 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmsimd-x86-rr2-p6-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.log2e);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_lo);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.one);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_pmax(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 4,263 | 37.071429 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-velu/gen/f32-velu-wasmsimd-x86-rr2-p6-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-velu/wasmsimd-rr2-p6.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vunary.h>
#include <xnnpack/common.h>
void xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.prescale);
const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.alpha);
const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.beta);
const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.sat_cutoff);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.magic_bias);
const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.log2e);
const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_hi);
const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.minus_ln2_lo);
const v128_t vc6 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c6);
const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c5);
const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c4);
const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c3);
const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.c2);
const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p6.one);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
const v128_t vz0123 = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx0123, vprescale));
const v128_t vz4567 = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx4567, vprescale));
v128_t vn0123 = wasm_f32x4_add(wasm_f32x4_mul(vz0123, vlog2e), vmagic_bias);
v128_t vn4567 = wasm_f32x4_add(wasm_f32x4_mul(vz4567, vlog2e), vmagic_bias);
v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
v128_t vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_hi), vz0123);
v128_t vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_hi), vz4567);
vt0123 = wasm_f32x4_add(wasm_f32x4_mul(vn0123, vminus_ln2_lo), vt0123);
vt4567 = wasm_f32x4_add(wasm_f32x4_mul(vn4567, vminus_ln2_lo), vt4567);
v128_t vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt0123), vc5);
v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc4);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc3);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vc2);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2);
vp0123 = wasm_f32x4_mul(vp0123, vt0123);
vp4567 = wasm_f32x4_mul(vp4567, vt4567);
vt0123 = wasm_f32x4_mul(vt0123, vs0123);
vs0123 = wasm_f32x4_sub(vs0123, vone);
vt4567 = wasm_f32x4_mul(vt4567, vs4567);
vs4567 = wasm_f32x4_sub(vs4567, vone);
vp0123 = wasm_f32x4_add(wasm_f32x4_mul(vp0123, vt0123), vt0123);
vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567);
const v128_t ve0123 = wasm_f32x4_mul(wasm_f32x4_add(vp0123, vs0123), valpha);
const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha);
const v128_t vsignm0123 = wasm_i32x4_shr(vx0123, 31);
vx0123 = wasm_f32x4_mul(vx0123, vbeta);
const v128_t vsignm4567 = wasm_i32x4_shr(vx4567, 31);
vx4567 = wasm_f32x4_mul(vx4567, vbeta);
const v128_t vy0123 = wasm_v128_bitselect(ve0123, vx0123, vsignm0123);
const v128_t vy4567 = wasm_v128_bitselect(ve4567, vx4567, vsignm4567);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
const v128_t vz = wasm_f32x4_pmax(vsat_cutoff, wasm_f32x4_mul(vx, vprescale));
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
wasm_v128_store(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
const v128_t vz = wasm_f32x4_pmax(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
vp = wasm_f32x4_mul(vp, vt);
vt = wasm_f32x4_mul(vt, vs);
vs = wasm_f32x4_sub(vs, vone);
vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
const v128_t vsignm = wasm_i32x4_shr(vx, 31);
vx = wasm_f32x4_mul(vx, vbeta);
v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 6,868 | 38.705202 | 92 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vhswish/gen/f32-vhswish-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vhswish/avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vhswish_ukernel__avx_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsixth = _mm256_load_ps(params->avx.sixth);
const __m256 vhalf = _mm256_load_ps(params->avx.half);
const __m256 vone = _mm256_load_ps(params->avx.one);
const __m256 vzero = _mm256_setzero_ps();
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m256 vx01234567 = _mm256_loadu_ps(input);
const __m256 vx89ABCDEF = _mm256_loadu_ps(input + 8);
input += 16;
__m256 vacc01234567 = _mm256_mul_ps(vx01234567, vsixth);
__m256 vacc89ABCDEF = _mm256_mul_ps(vx89ABCDEF, vsixth);
vacc01234567 = _mm256_add_ps(vacc01234567, vhalf);
vacc89ABCDEF = _mm256_add_ps(vacc89ABCDEF, vhalf);
vacc01234567 = _mm256_max_ps(vacc01234567, vzero);
vacc89ABCDEF = _mm256_max_ps(vacc89ABCDEF, vzero);
vacc01234567 = _mm256_min_ps(vacc01234567, vone);
vacc89ABCDEF = _mm256_min_ps(vacc89ABCDEF, vone);
vacc01234567 = _mm256_mul_ps(vacc01234567, vx01234567);
vacc89ABCDEF = _mm256_mul_ps(vacc89ABCDEF, vx89ABCDEF);
_mm256_storeu_ps(output, vacc01234567);
_mm256_storeu_ps(output + 8, vacc89ABCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vacc = _mm256_mul_ps(vx, vsixth);
vacc = _mm256_add_ps(vacc, vhalf);
vacc = _mm256_max_ps(vacc, vzero);
vacc = _mm256_min_ps(vacc, vone);
vacc = _mm256_mul_ps(vacc, vx);
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vacc = _mm256_mul_ps(vx, vsixth);
vacc = _mm256_add_ps(vacc, vhalf);
vacc = _mm256_max_ps(vacc, vzero);
vacc = _mm256_min_ps(vacc, vone);
vacc = _mm256_mul_ps(vacc, vx);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 3,082 | 30.783505 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vhswish/gen/f32-vhswish-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vhswish/avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vhswish_ukernel__avx_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsixth = _mm256_load_ps(params->avx.sixth);
const __m256 vhalf = _mm256_load_ps(params->avx.half);
const __m256 vone = _mm256_load_ps(params->avx.one);
const __m256 vzero = _mm256_setzero_ps();
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vacc = _mm256_mul_ps(vx, vsixth);
vacc = _mm256_add_ps(vacc, vhalf);
vacc = _mm256_max_ps(vacc, vzero);
vacc = _mm256_min_ps(vacc, vone);
vacc = _mm256_mul_ps(vacc, vx);
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vacc = _mm256_mul_ps(vx, vsixth);
vacc = _mm256_add_ps(vacc, vhalf);
vacc = _mm256_max_ps(vacc, vzero);
vacc = _mm256_min_ps(vacc, vone);
vacc = _mm256_mul_ps(vacc, vx);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 2,193 | 29.054795 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vhswish/gen/f32-vhswish-avx512f-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vhswish/avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vhswish_ukernel__avx512f_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsixth = _mm512_set1_ps(params->avx512.sixth);
const __m512 vhalf = _mm512_set1_ps(params->avx512.half);
const __m512 vone = _mm512_set1_ps(params->avx512.one);
const __m512 vzero = _mm512_setzero_ps();
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
__m512 vacc = _mm512_fmadd_ps(vx, vsixth, vhalf);
vacc = _mm512_max_ps(vacc, vzero);
vacc = _mm512_min_ps(vacc, vone);
vacc = _mm512_mul_ps(vacc, vx);
_mm512_storeu_ps(output, vacc);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
__m512 vacc = _mm512_fmadd_ps(vx, vsixth, vhalf);
vacc = _mm512_max_ps(vacc, vzero);
vacc = _mm512_min_ps(vacc, vone);
vacc = _mm512_mul_ps(vacc, vx);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 1,900 | 30.683333 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vhswish/gen/f32-vhswish-avx512f-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vhswish/avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vhswish_ukernel__avx512f_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vsixth = _mm512_set1_ps(params->avx512.sixth);
const __m512 vhalf = _mm512_set1_ps(params->avx512.half);
const __m512 vone = _mm512_set1_ps(params->avx512.one);
const __m512 vzero = _mm512_setzero_ps();
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
const __m512 vx0123456789ABCDEF = _mm512_loadu_ps(input);
const __m512 vxGHIJKLMNOPQRSTUV = _mm512_loadu_ps(input + 16);
input += 32;
__m512 vacc0123456789ABCDEF = _mm512_fmadd_ps(vx0123456789ABCDEF, vsixth, vhalf);
__m512 vaccGHIJKLMNOPQRSTUV = _mm512_fmadd_ps(vxGHIJKLMNOPQRSTUV, vsixth, vhalf);
vacc0123456789ABCDEF = _mm512_max_ps(vacc0123456789ABCDEF, vzero);
vaccGHIJKLMNOPQRSTUV = _mm512_max_ps(vaccGHIJKLMNOPQRSTUV, vzero);
vacc0123456789ABCDEF = _mm512_min_ps(vacc0123456789ABCDEF, vone);
vaccGHIJKLMNOPQRSTUV = _mm512_min_ps(vaccGHIJKLMNOPQRSTUV, vone);
vacc0123456789ABCDEF = _mm512_mul_ps(vacc0123456789ABCDEF, vx0123456789ABCDEF);
vaccGHIJKLMNOPQRSTUV = _mm512_mul_ps(vaccGHIJKLMNOPQRSTUV, vxGHIJKLMNOPQRSTUV);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
_mm512_storeu_ps(output + 16, vaccGHIJKLMNOPQRSTUV);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m512 vx = _mm512_loadu_ps(input);
input += 16;
__m512 vacc = _mm512_fmadd_ps(vx, vsixth, vhalf);
vacc = _mm512_max_ps(vacc, vzero);
vacc = _mm512_min_ps(vacc, vone);
vacc = _mm512_mul_ps(vacc, vx);
_mm512_storeu_ps(output, vacc);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
const __m512 vx = _mm512_maskz_loadu_ps(vmask, input);
__m512 vacc = _mm512_fmadd_ps(vx, vsixth, vhalf);
vacc = _mm512_max_ps(vacc, vzero);
vacc = _mm512_min_ps(vacc, vone);
vacc = _mm512_mul_ps(vacc, vx);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 2,873 | 34.481481 | 105 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vhswish/gen/f32-vhswish-fma3-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vhswish/avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vhswish_ukernel__fma3_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsixth = _mm256_load_ps(params->avx.sixth);
const __m256 vhalf = _mm256_load_ps(params->avx.half);
const __m256 vone = _mm256_load_ps(params->avx.one);
const __m256 vzero = _mm256_setzero_ps();
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m256 vx01234567 = _mm256_loadu_ps(input);
const __m256 vx89ABCDEF = _mm256_loadu_ps(input + 8);
input += 16;
__m256 vacc01234567 = _mm256_fmadd_ps(vx01234567, vsixth, vhalf);
__m256 vacc89ABCDEF = _mm256_fmadd_ps(vx89ABCDEF, vsixth, vhalf);
vacc01234567 = _mm256_max_ps(vacc01234567, vzero);
vacc89ABCDEF = _mm256_max_ps(vacc89ABCDEF, vzero);
vacc01234567 = _mm256_min_ps(vacc01234567, vone);
vacc89ABCDEF = _mm256_min_ps(vacc89ABCDEF, vone);
vacc01234567 = _mm256_mul_ps(vacc01234567, vx01234567);
vacc89ABCDEF = _mm256_mul_ps(vacc89ABCDEF, vx89ABCDEF);
_mm256_storeu_ps(output, vacc01234567);
_mm256_storeu_ps(output + 8, vacc89ABCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vacc = _mm256_fmadd_ps(vx, vsixth, vhalf);
vacc = _mm256_max_ps(vacc, vzero);
vacc = _mm256_min_ps(vacc, vone);
vacc = _mm256_mul_ps(vacc, vx);
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vacc = _mm256_fmadd_ps(vx, vsixth, vhalf);
vacc = _mm256_max_ps(vacc, vzero);
vacc = _mm256_min_ps(vacc, vone);
vacc = _mm256_mul_ps(vacc, vx);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 2,930 | 30.858696 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vhswish/gen/f32-vhswish-fma3-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vhswish/avx.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vhswish_ukernel__fma3_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vsixth = _mm256_load_ps(params->avx.sixth);
const __m256 vhalf = _mm256_load_ps(params->avx.half);
const __m256 vone = _mm256_load_ps(params->avx.one);
const __m256 vzero = _mm256_setzero_ps();
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vacc = _mm256_fmadd_ps(vx, vsixth, vhalf);
vacc = _mm256_max_ps(vacc, vzero);
vacc = _mm256_min_ps(vacc, vone);
vacc = _mm256_mul_ps(vacc, vx);
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vacc = _mm256_fmadd_ps(vx, vsixth, vhalf);
vacc = _mm256_max_ps(vacc, vzero);
vacc = _mm256_min_ps(vacc, vone);
vacc = _mm256_mul_ps(vacc, vx);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 2,134 | 29.070423 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vhswish/gen/f32-vhswish-neon-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vhswish/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vhswish_ukernel__neon_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vsixth = vld1q_dup_f32(¶ms->scalar.sixth);
const float32x4_t vthree = vld1q_dup_f32(¶ms->scalar.three);
const int32x4_t vsix = vreinterpretq_s32_f32(vld1q_dup_f32(¶ms->scalar.six));
const int32x4_t vzero = vdupq_n_s32(0);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vx89AB = vld1q_f32(input); input += 4;
float32x4_t vxCDEF = vld1q_f32(input); input += 4;
float32x4_t vacc0123 = vaddq_f32(vx0123, vthree);
vx0123 = vmulq_f32(vx0123, vsixth);
float32x4_t vacc4567 = vaddq_f32(vx4567, vthree);
vx4567 = vmulq_f32(vx4567, vsixth);
float32x4_t vacc89AB = vaddq_f32(vx89AB, vthree);
vx89AB = vmulq_f32(vx89AB, vsixth);
float32x4_t vaccCDEF = vaddq_f32(vxCDEF, vthree);
vxCDEF = vmulq_f32(vxCDEF, vsixth);
vacc0123 = vreinterpretq_f32_s32(vmaxq_s32(vreinterpretq_s32_f32(vacc0123), vzero));
vacc4567 = vreinterpretq_f32_s32(vmaxq_s32(vreinterpretq_s32_f32(vacc4567), vzero));
vacc89AB = vreinterpretq_f32_s32(vmaxq_s32(vreinterpretq_s32_f32(vacc89AB), vzero));
vaccCDEF = vreinterpretq_f32_s32(vmaxq_s32(vreinterpretq_s32_f32(vaccCDEF), vzero));
vacc0123 = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc0123), vsix));
vacc4567 = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc4567), vsix));
vacc89AB = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc89AB), vsix));
vaccCDEF = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vaccCDEF), vsix));
vacc0123 = vmulq_f32(vacc0123, vx0123);
vacc4567 = vmulq_f32(vacc4567, vx4567);
vacc89AB = vmulq_f32(vacc89AB, vx89AB);
vaccCDEF = vmulq_f32(vaccCDEF, vxCDEF);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
vst1q_f32(output, vacc89AB); output += 4;
vst1q_f32(output, vaccCDEF); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vacc = vaddq_f32(vx, vthree);
vx = vmulq_f32(vx, vsixth);
vacc = vreinterpretq_f32_s32(vmaxq_s32(vreinterpretq_s32_f32(vacc), vzero));
vacc = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc), vsix));
vacc = vmulq_f32(vacc, vx);
vst1q_f32(output, vacc); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
float32x4_t vacc = vaddq_f32(vx, vthree);
vx = vmulq_f32(vx, vsixth);
vacc = vreinterpretq_f32_s32(vmaxq_s32(vreinterpretq_s32_f32(vacc), vzero));
vacc = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc), vsix));
vacc = vmulq_f32(vacc, vx);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 3,728 | 37.84375 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vhswish/gen/f32-vhswish-neon-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vhswish/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vhswish_ukernel__neon_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vsixth = vld1q_dup_f32(¶ms->scalar.sixth);
const float32x4_t vthree = vld1q_dup_f32(¶ms->scalar.three);
const int32x4_t vsix = vreinterpretq_s32_f32(vld1q_dup_f32(¶ms->scalar.six));
const int32x4_t vzero = vdupq_n_s32(0);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vacc = vaddq_f32(vx, vthree);
vx = vmulq_f32(vx, vsixth);
vacc = vreinterpretq_f32_s32(vmaxq_s32(vreinterpretq_s32_f32(vacc), vzero));
vacc = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc), vsix));
vacc = vmulq_f32(vacc, vx);
vst1q_f32(output, vacc); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
float32x4_t vacc = vaddq_f32(vx, vthree);
vx = vmulq_f32(vx, vsixth);
vacc = vreinterpretq_f32_s32(vmaxq_s32(vreinterpretq_s32_f32(vacc), vzero));
vacc = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc), vsix));
vacc = vmulq_f32(vacc, vx);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 1,985 | 31.557377 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vhswish/gen/f32-vhswish-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vhswish/neon.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vhswish_ukernel__neon_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vsixth = vld1q_dup_f32(¶ms->scalar.sixth);
const float32x4_t vthree = vld1q_dup_f32(¶ms->scalar.three);
const int32x4_t vsix = vreinterpretq_s32_f32(vld1q_dup_f32(¶ms->scalar.six));
const int32x4_t vzero = vdupq_n_s32(0);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vacc0123 = vaddq_f32(vx0123, vthree);
vx0123 = vmulq_f32(vx0123, vsixth);
float32x4_t vacc4567 = vaddq_f32(vx4567, vthree);
vx4567 = vmulq_f32(vx4567, vsixth);
vacc0123 = vreinterpretq_f32_s32(vmaxq_s32(vreinterpretq_s32_f32(vacc0123), vzero));
vacc4567 = vreinterpretq_f32_s32(vmaxq_s32(vreinterpretq_s32_f32(vacc4567), vzero));
vacc0123 = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc0123), vsix));
vacc4567 = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc4567), vsix));
vacc0123 = vmulq_f32(vacc0123, vx0123);
vacc4567 = vmulq_f32(vacc4567, vx4567);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vacc = vaddq_f32(vx, vthree);
vx = vmulq_f32(vx, vsixth);
vacc = vreinterpretq_f32_s32(vmaxq_s32(vreinterpretq_s32_f32(vacc), vzero));
vacc = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc), vsix));
vacc = vmulq_f32(vacc, vx);
vst1q_f32(output, vacc); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
float32x4_t vx = vld1q_f32(input);
float32x4_t vacc = vaddq_f32(vx, vthree);
vx = vmulq_f32(vx, vsixth);
vacc = vreinterpretq_f32_s32(vmaxq_s32(vreinterpretq_s32_f32(vacc), vzero));
vacc = vreinterpretq_f32_s32(vminq_s32(vreinterpretq_s32_f32(vacc), vsix));
vacc = vmulq_f32(vacc, vx);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 2,893 | 34.292683 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vhswish/gen/f32-vhswish-rvv-x1v.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vhswish/rvv.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <riscv_vector.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vhswish_ukernel__rvv_x1v(
size_t batch,
const float* input,
float* output,
const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsixth = params->scalar.sixth;
const float vthree = params->scalar.three;
const float vsix = params->scalar.six;
const float vzero = 0.0f;
assert(vthree == 3.0f);
assert(vsix == 6.0f);
batch >>= XNN_LOG2_SIZEOF_FLOAT;
do {
const size_t n = __riscv_vsetvl_e32m1(batch);
vfloat32m1_t vx = __riscv_vle32_v_f32m1(input, n);
input += n;
vfloat32m1_t vacc = __riscv_vfadd_vf_f32m1(vx, vthree, n);
vx = __riscv_vfmul_vf_f32m1(vx, vsixth, n);
vacc = __riscv_vfmax_vf_f32m1(vacc, vzero, n);
vacc = __riscv_vfmin_vf_f32m1(vacc, vsix, n);
vacc = __riscv_vfmul_vv_f32m1(vacc, vx, n);
__riscv_vse32_v_f32m1(output, vacc, n);
output += n;
batch -= n;
} while (batch != 0);
}
| 1,447 | 26.320755 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vhswish/gen/f32-vhswish-rvv-x2v.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vhswish/rvv.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <riscv_vector.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vhswish_ukernel__rvv_x2v(
size_t batch,
const float* input,
float* output,
const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsixth = params->scalar.sixth;
const float vthree = params->scalar.three;
const float vsix = params->scalar.six;
const float vzero = 0.0f;
assert(vthree == 3.0f);
assert(vsix == 6.0f);
batch >>= XNN_LOG2_SIZEOF_FLOAT;
do {
const size_t n = __riscv_vsetvl_e32m2(batch);
vfloat32m2_t vx = __riscv_vle32_v_f32m2(input, n);
input += n;
vfloat32m2_t vacc = __riscv_vfadd_vf_f32m2(vx, vthree, n);
vx = __riscv_vfmul_vf_f32m2(vx, vsixth, n);
vacc = __riscv_vfmax_vf_f32m2(vacc, vzero, n);
vacc = __riscv_vfmin_vf_f32m2(vacc, vsix, n);
vacc = __riscv_vfmul_vv_f32m2(vacc, vx, n);
__riscv_vse32_v_f32m2(output, vacc, n);
output += n;
batch -= n;
} while (batch != 0);
}
| 1,447 | 26.320755 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vhswish/gen/f32-vhswish-rvv-x4v.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vhswish/rvv.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <riscv_vector.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vhswish_ukernel__rvv_x4v(
size_t batch,
const float* input,
float* output,
const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsixth = params->scalar.sixth;
const float vthree = params->scalar.three;
const float vsix = params->scalar.six;
const float vzero = 0.0f;
assert(vthree == 3.0f);
assert(vsix == 6.0f);
batch >>= XNN_LOG2_SIZEOF_FLOAT;
do {
const size_t n = __riscv_vsetvl_e32m4(batch);
vfloat32m4_t vx = __riscv_vle32_v_f32m4(input, n);
input += n;
vfloat32m4_t vacc = __riscv_vfadd_vf_f32m4(vx, vthree, n);
vx = __riscv_vfmul_vf_f32m4(vx, vsixth, n);
vacc = __riscv_vfmax_vf_f32m4(vacc, vzero, n);
vacc = __riscv_vfmin_vf_f32m4(vacc, vsix, n);
vacc = __riscv_vfmul_vv_f32m4(vacc, vx, n);
__riscv_vse32_v_f32m4(output, vacc, n);
output += n;
batch -= n;
} while (batch != 0);
}
| 1,447 | 26.320755 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vhswish/gen/f32-vhswish-rvv-x8v.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vhswish/rvv.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <riscv_vector.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vhswish_ukernel__rvv_x8v(
size_t batch,
const float* input,
float* output,
const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsixth = params->scalar.sixth;
const float vthree = params->scalar.three;
const float vsix = params->scalar.six;
const float vzero = 0.0f;
assert(vthree == 3.0f);
assert(vsix == 6.0f);
batch >>= XNN_LOG2_SIZEOF_FLOAT;
do {
const size_t n = __riscv_vsetvl_e32m8(batch);
vfloat32m8_t vx = __riscv_vle32_v_f32m8(input, n);
input += n;
vfloat32m8_t vacc = __riscv_vfadd_vf_f32m8(vx, vthree, n);
vx = __riscv_vfmul_vf_f32m8(vx, vsixth, n);
vacc = __riscv_vfmax_vf_f32m8(vacc, vzero, n);
vacc = __riscv_vfmin_vf_f32m8(vacc, vsix, n);
vacc = __riscv_vfmul_vv_f32m8(vacc, vx, n);
__riscv_vse32_v_f32m8(output, vacc, n);
output += n;
batch -= n;
} while (batch != 0);
}
| 1,447 | 26.320755 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vhswish/gen/f32-vhswish-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vhswish/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vhswish_ukernel__scalar_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsixth = params->scalar.sixth;
const float vthree = params->scalar.three;
const float vsix = params->scalar.six;
const float vzero = 0.0f;
assert(vthree == 3.0f);
assert(vsix == 6.0f);
for (; batch >= sizeof(float); batch -= sizeof(float)) {
float vx = *input++;
float vacc = vx + vthree;
vx *= vsixth;
vacc = math_max_f32(vacc, vzero);
vacc = math_min_f32(vacc, vsix);
vacc *= vx;
*output++ = vacc;
}
}
| 1,134 | 24.222222 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vhswish/gen/f32-vhswish-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vhswish/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vhswish_ukernel__scalar_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsixth = params->scalar.sixth;
const float vthree = params->scalar.three;
const float vsix = params->scalar.six;
const float vzero = 0.0f;
assert(vthree == 3.0f);
assert(vsix == 6.0f);
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
input += 2;
float vacc0 = vx0 + vthree;
vx0 *= vsixth;
float vacc1 = vx1 + vthree;
vx1 *= vsixth;
vacc0 = math_max_f32(vacc0, vzero);
vacc1 = math_max_f32(vacc1, vzero);
vacc0 = math_min_f32(vacc0, vsix);
vacc1 = math_min_f32(vacc1, vsix);
vacc0 *= vx0;
vacc1 *= vx1;
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
float vx = *input;
float vacc = vx + vthree;
vx *= vsixth;
vacc = math_max_f32(vacc, vzero);
vacc = math_min_f32(vacc, vsix);
vacc *= vx;
*output = vacc;
}
}
| 1,606 | 22.632353 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vhswish/gen/f32-vhswish-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vhswish/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vhswish_ukernel__scalar_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsixth = params->scalar.sixth;
const float vthree = params->scalar.three;
const float vsix = params->scalar.six;
const float vzero = 0.0f;
assert(vthree == 3.0f);
assert(vsix == 6.0f);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
float vx3 = input[3];
input += 4;
float vacc0 = vx0 + vthree;
vx0 *= vsixth;
float vacc1 = vx1 + vthree;
vx1 *= vsixth;
float vacc2 = vx2 + vthree;
vx2 *= vsixth;
float vacc3 = vx3 + vthree;
vx3 *= vsixth;
vacc0 = math_max_f32(vacc0, vzero);
vacc1 = math_max_f32(vacc1, vzero);
vacc2 = math_max_f32(vacc2, vzero);
vacc3 = math_max_f32(vacc3, vzero);
vacc0 = math_min_f32(vacc0, vsix);
vacc1 = math_min_f32(vacc1, vsix);
vacc2 = math_min_f32(vacc2, vsix);
vacc3 = math_min_f32(vacc3, vsix);
vacc0 *= vx0;
vacc1 *= vx1;
vacc2 *= vx2;
vacc3 *= vx3;
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
float vacc = vx + vthree;
vx *= vsixth;
vacc = math_max_f32(vacc, vzero);
vacc = math_min_f32(vacc, vsix);
vacc *= vx;
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,083 | 23.517647 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vhswish/gen/f32-vhswish-sse-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vhswish/sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vhswish_ukernel__sse_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsixth = _mm_load_ps(params->sse.sixth);
const __m128 vhalf = _mm_load_ps(params->sse.half);
const __m128 vone = _mm_load_ps(params->sse.one);
const __m128 vzero = _mm_setzero_ps();
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
input += 4;
__m128 vacc0123 = _mm_mul_ps(vx0123, vsixth);
vacc0123 = _mm_add_ps(vacc0123, vhalf);
vacc0123 = _mm_max_ps(vacc0123, vzero);
vacc0123 = _mm_min_ps(vacc0123, vone);
vacc0123 = _mm_mul_ps(vacc0123, vx0123);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx0123 = _mm_loadu_ps(input);
__m128 vacc0123 = _mm_mul_ps(vx0123, vsixth);
vacc0123 = _mm_add_ps(vacc0123, vhalf);
vacc0123 = _mm_max_ps(vacc0123, vzero);
vacc0123 = _mm_min_ps(vacc0123, vone);
vacc0123 = _mm_mul_ps(vacc0123, vx0123);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc0123);
}
}
}
| 1,862 | 26 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vhswish/gen/f32-vhswish-sse-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vhswish/sse.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vhswish_ukernel__sse_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vsixth = _mm_load_ps(params->sse.sixth);
const __m128 vhalf = _mm_load_ps(params->sse.half);
const __m128 vone = _mm_load_ps(params->sse.one);
const __m128 vzero = _mm_setzero_ps();
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
input += 8;
__m128 vacc0123 = _mm_mul_ps(vx0123, vsixth);
__m128 vacc4567 = _mm_mul_ps(vx4567, vsixth);
vacc0123 = _mm_add_ps(vacc0123, vhalf);
vacc4567 = _mm_add_ps(vacc4567, vhalf);
vacc0123 = _mm_max_ps(vacc0123, vzero);
vacc4567 = _mm_max_ps(vacc4567, vzero);
vacc0123 = _mm_min_ps(vacc0123, vone);
vacc4567 = _mm_min_ps(vacc4567, vone);
vacc0123 = _mm_mul_ps(vacc0123, vx0123);
vacc4567 = _mm_mul_ps(vacc4567, vx4567);
_mm_storeu_ps(output, vacc0123);
_mm_storeu_ps(output + 4, vacc4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
input += 4;
__m128 vacc0123 = _mm_mul_ps(vx0123, vsixth);
vacc0123 = _mm_add_ps(vacc0123, vhalf);
vacc0123 = _mm_max_ps(vacc0123, vzero);
vacc0123 = _mm_min_ps(vacc0123, vone);
vacc0123 = _mm_mul_ps(vacc0123, vx0123);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx0123 = _mm_loadu_ps(input);
__m128 vacc0123 = _mm_mul_ps(vx0123, vsixth);
vacc0123 = _mm_add_ps(vacc0123, vhalf);
vacc0123 = _mm_max_ps(vacc0123, vzero);
vacc0123 = _mm_min_ps(vacc0123, vone);
vacc0123 = _mm_mul_ps(vacc0123, vx0123);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc0123);
vacc0123 = _mm_movehl_ps(vacc0123, vacc0123);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc0123);
}
}
}
| 2,594 | 28.827586 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vhswish/gen/f32-vhswish-wasm-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vhswish/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vhswish_ukernel__wasm_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsixth = params->scalar.sixth;
const float vthree = params->scalar.three;
const float vsix = params->scalar.six;
const float vzero = 0.0f;
assert(vthree == 3.0f);
assert(vsix == 6.0f);
for (; batch >= sizeof(float); batch -= sizeof(float)) {
float vx = *input++;
float vacc = vx + vthree;
vx *= vsixth;
vacc = __builtin_wasm_max_f32(vacc, vzero);
vacc = __builtin_wasm_min_f32(vacc, vsix);
vacc *= vx;
*output++ = vacc;
}
}
| 1,152 | 24.622222 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vhswish/gen/f32-vhswish-wasm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vhswish/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vhswish_ukernel__wasm_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsixth = params->scalar.sixth;
const float vthree = params->scalar.three;
const float vsix = params->scalar.six;
const float vzero = 0.0f;
assert(vthree == 3.0f);
assert(vsix == 6.0f);
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
input += 2;
float vacc0 = vx0 + vthree;
vx0 *= vsixth;
float vacc1 = vx1 + vthree;
vx1 *= vsixth;
vacc0 = __builtin_wasm_max_f32(vacc0, vzero);
vacc1 = __builtin_wasm_max_f32(vacc1, vzero);
vacc0 = __builtin_wasm_min_f32(vacc0, vsix);
vacc1 = __builtin_wasm_min_f32(vacc1, vsix);
vacc0 *= vx0;
vacc1 *= vx1;
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
float vx = *input;
float vacc = vx + vthree;
vx *= vsixth;
vacc = __builtin_wasm_max_f32(vacc, vzero);
vacc = __builtin_wasm_min_f32(vacc, vsix);
vacc *= vx;
*output = vacc;
}
}
| 1,664 | 23.485294 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vhswish/gen/f32-vhswish-wasm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vhswish/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vunary.h>
void xnn_f32_vhswish_ukernel__wasm_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vsixth = params->scalar.sixth;
const float vthree = params->scalar.three;
const float vsix = params->scalar.six;
const float vzero = 0.0f;
assert(vthree == 3.0f);
assert(vsix == 6.0f);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
float vx0 = input[0];
float vx1 = input[1];
float vx2 = input[2];
float vx3 = input[3];
input += 4;
float vacc0 = vx0 + vthree;
vx0 *= vsixth;
float vacc1 = vx1 + vthree;
vx1 *= vsixth;
float vacc2 = vx2 + vthree;
vx2 *= vsixth;
float vacc3 = vx3 + vthree;
vx3 *= vsixth;
vacc0 = __builtin_wasm_max_f32(vacc0, vzero);
vacc1 = __builtin_wasm_max_f32(vacc1, vzero);
vacc2 = __builtin_wasm_max_f32(vacc2, vzero);
vacc3 = __builtin_wasm_max_f32(vacc3, vzero);
vacc0 = __builtin_wasm_min_f32(vacc0, vsix);
vacc1 = __builtin_wasm_min_f32(vacc1, vsix);
vacc2 = __builtin_wasm_min_f32(vacc2, vsix);
vacc3 = __builtin_wasm_min_f32(vacc3, vsix);
vacc0 *= vx0;
vacc1 *= vx1;
vacc2 *= vx2;
vacc3 *= vx3;
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
float vx = *input++;
float vacc = vx + vthree;
vx *= vsixth;
vacc = __builtin_wasm_max_f32(vacc, vzero);
vacc = __builtin_wasm_min_f32(vacc, vsix);
vacc *= vx;
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,181 | 24.670588 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vhswish/gen/f32-vhswish-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vhswish/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vhswish_ukernel__wasmsimd_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsixth = wasm_v128_load64_splat(params->wasmsimd.sixth);
const v128_t vthree = wasm_v128_load64_splat(params->wasmsimd.three);
const v128_t vsix = wasm_v128_load64_splat(params->wasmsimd.six);
const v128_t vzero = wasm_i32x4_const_splat(0);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
v128_t vx89AB = wasm_v128_load(input + 8);
v128_t vxCDEF = wasm_v128_load(input + 12);
input += 16;
v128_t vacc0123 = wasm_f32x4_add(vx0123, vthree);
vx0123 = wasm_f32x4_mul(vx0123, vsixth);
v128_t vacc4567 = wasm_f32x4_add(vx4567, vthree);
vx4567 = wasm_f32x4_mul(vx4567, vsixth);
v128_t vacc89AB = wasm_f32x4_add(vx89AB, vthree);
vx89AB = wasm_f32x4_mul(vx89AB, vsixth);
v128_t vaccCDEF = wasm_f32x4_add(vxCDEF, vthree);
vxCDEF = wasm_f32x4_mul(vxCDEF, vsixth);
vacc0123 = wasm_i32x4_max(vacc0123, vzero);
vacc4567 = wasm_i32x4_max(vacc4567, vzero);
vacc89AB = wasm_i32x4_max(vacc89AB, vzero);
vaccCDEF = wasm_i32x4_max(vaccCDEF, vzero);
vacc0123 = wasm_i32x4_min(vacc0123, vsix);
vacc4567 = wasm_i32x4_min(vacc4567, vsix);
vacc89AB = wasm_i32x4_min(vacc89AB, vsix);
vaccCDEF = wasm_i32x4_min(vaccCDEF, vsix);
vacc0123 = wasm_f32x4_mul(vacc0123, vx0123);
vacc4567 = wasm_f32x4_mul(vacc4567, vx4567);
vacc89AB = wasm_f32x4_mul(vacc89AB, vx89AB);
vaccCDEF = wasm_f32x4_mul(vaccCDEF, vxCDEF);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
wasm_v128_store(output + 8, vacc89AB);
wasm_v128_store(output + 12, vaccCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vacc = wasm_f32x4_add(vx, vthree);
vx = wasm_f32x4_mul(vx, vsixth);
vacc = wasm_i32x4_max(vacc, vzero);
vacc = wasm_i32x4_min(vacc, vsix);
vacc = wasm_f32x4_mul(vacc, vx);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
v128_t vacc = wasm_f32x4_add(vx, vthree);
vx = wasm_f32x4_mul(vx, vsixth);
vacc = wasm_i32x4_max(vacc, vzero);
vacc = wasm_i32x4_min(vacc, vsix);
vacc = wasm_f32x4_mul(vacc, vx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 3,306 | 31.106796 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vhswish/gen/f32-vhswish-wasmsimd-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vhswish/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vhswish_ukernel__wasmsimd_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsixth = wasm_v128_load64_splat(params->wasmsimd.sixth);
const v128_t vthree = wasm_v128_load64_splat(params->wasmsimd.three);
const v128_t vsix = wasm_v128_load64_splat(params->wasmsimd.six);
const v128_t vzero = wasm_i32x4_const_splat(0);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vacc = wasm_f32x4_add(vx, vthree);
vx = wasm_f32x4_mul(vx, vsixth);
vacc = wasm_i32x4_max(vacc, vzero);
vacc = wasm_i32x4_min(vacc, vsix);
vacc = wasm_f32x4_mul(vacc, vx);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
v128_t vacc = wasm_f32x4_add(vx, vthree);
vx = wasm_f32x4_mul(vx, vsixth);
vacc = wasm_i32x4_max(vacc, vzero);
vacc = wasm_i32x4_min(vacc, vsix);
vacc = wasm_f32x4_mul(vacc, vx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,866 | 27.287879 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vhswish/gen/f32-vhswish-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vhswish/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vhswish_ukernel__wasmsimd_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vsixth = wasm_v128_load64_splat(params->wasmsimd.sixth);
const v128_t vthree = wasm_v128_load64_splat(params->wasmsimd.three);
const v128_t vsix = wasm_v128_load64_splat(params->wasmsimd.six);
const v128_t vzero = wasm_i32x4_const_splat(0);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
v128_t vacc0123 = wasm_f32x4_add(vx0123, vthree);
vx0123 = wasm_f32x4_mul(vx0123, vsixth);
v128_t vacc4567 = wasm_f32x4_add(vx4567, vthree);
vx4567 = wasm_f32x4_mul(vx4567, vsixth);
vacc0123 = wasm_i32x4_max(vacc0123, vzero);
vacc4567 = wasm_i32x4_max(vacc4567, vzero);
vacc0123 = wasm_i32x4_min(vacc0123, vsix);
vacc4567 = wasm_i32x4_min(vacc4567, vsix);
vacc0123 = wasm_f32x4_mul(vacc0123, vx0123);
vacc4567 = wasm_f32x4_mul(vacc4567, vx4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vacc = wasm_f32x4_add(vx, vthree);
vx = wasm_f32x4_mul(vx, vsixth);
vacc = wasm_i32x4_max(vacc, vzero);
vacc = wasm_i32x4_min(vacc, vsix);
vacc = wasm_f32x4_mul(vacc, vx);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
v128_t vacc = wasm_f32x4_add(vx, vthree);
vx = wasm_f32x4_mul(vx, vsixth);
vacc = wasm_i32x4_max(vacc, vzero);
vacc = wasm_i32x4_min(vacc, vsix);
vacc = wasm_f32x4_mul(vacc, vx);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 2,633 | 28.595506 | 89 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/avx.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__avx_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vslope = _mm256_load_ps(params->avx.slope);
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
const __m256 vx01234567 = _mm256_loadu_ps(input);
const __m256 vx89ABCDEF = _mm256_loadu_ps(input + 8);
input += 16;
__m256 vacc01234567 = _mm256_mul_ps(vx01234567, vslope);
__m256 vacc89ABCDEF = _mm256_mul_ps(vx89ABCDEF, vslope);
vacc01234567 = _mm256_blendv_ps(vx01234567, vacc01234567, vx01234567);
vacc89ABCDEF = _mm256_blendv_ps(vx89ABCDEF, vacc89ABCDEF, vx89ABCDEF);
_mm256_storeu_ps(output, vacc01234567);
_mm256_storeu_ps(output + 8, vacc89ABCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx = _mm256_loadu_ps(input);
input += 8;
__m256 vacc = _mm256_mul_ps(vx, vslope);
vacc = _mm256_blendv_ps(vx, vacc, vx);
_mm256_storeu_ps(output, vacc);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vacc = _mm256_mul_ps(vx, vslope);
vacc = _mm256_blendv_ps(vx, vacc, vx);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 2,403 | 29.820513 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/avx.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__avx_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256 vslope = _mm256_load_ps(params->avx.slope);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m256 vx01234567 = _mm256_loadu_ps(input);
input += 8;
__m256 vacc01234567 = _mm256_mul_ps(vx01234567, vslope);
vacc01234567 = _mm256_blendv_ps(vx01234567, vacc01234567, vx01234567);
_mm256_storeu_ps(output, vacc01234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - batch));
const __m256 vx = _mm256_maskload_ps(input, vmask);
__m256 vacc = _mm256_mul_ps(vx, vslope);
vacc = _mm256_blendv_ps(vx, vacc, vx);
__m128 vacc_lo = _mm256_castps256_ps128(vacc);
if (batch & (4 * sizeof(float))) {
_mm_storeu_ps(output, vacc_lo);
vacc_lo = _mm256_extractf128_ps(vacc, 1);
output += 4;
}
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc_lo);
vacc_lo = _mm_movehl_ps(vacc_lo, vacc_lo);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc_lo);
}
}
}
| 1,882 | 27.530303 | 112 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-avx512f-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__avx512f_x16(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vslope = _mm512_set1_ps(params->scalar.slope);
const __m512 vzero = _mm512_setzero_ps();
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc0123456789ABCDEF = _mm512_loadu_ps(input);
input += 16;
const __mmask16 vsign0123456789ABCDEF = _mm512_cmp_ps_mask(vacc0123456789ABCDEF, vzero, _CMP_LT_OQ);
vacc0123456789ABCDEF = _mm512_mask_mul_ps(vacc0123456789ABCDEF, vsign0123456789ABCDEF, vacc0123456789ABCDEF, vslope);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input);
const __mmask16 vsign = _mm512_cmp_ps_mask(vacc, vzero, _CMP_LT_OQ);
vacc = _mm512_mask_mul_ps(vacc, vsign, vacc, vslope);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 1,830 | 31.122807 | 121 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-avx512f-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/avx512f.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__avx512f_x32(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m512 vslope = _mm512_set1_ps(params->scalar.slope);
const __m512 vzero = _mm512_setzero_ps();
for (; batch >= 32 * sizeof(float); batch -= 32 * sizeof(float)) {
__m512 vacc0123456789ABCDEF = _mm512_loadu_ps(input);
__m512 vaccGHIJKLMNOPQRSTUV = _mm512_loadu_ps(input + 16);
input += 32;
const __mmask16 vsign0123456789ABCDEF = _mm512_cmp_ps_mask(vacc0123456789ABCDEF, vzero, _CMP_LT_OQ);
const __mmask16 vsignGHIJKLMNOPQRSTUV = _mm512_cmp_ps_mask(vaccGHIJKLMNOPQRSTUV, vzero, _CMP_LT_OQ);
vacc0123456789ABCDEF = _mm512_mask_mul_ps(vacc0123456789ABCDEF, vsign0123456789ABCDEF, vacc0123456789ABCDEF, vslope);
vaccGHIJKLMNOPQRSTUV = _mm512_mask_mul_ps(vaccGHIJKLMNOPQRSTUV, vsignGHIJKLMNOPQRSTUV, vaccGHIJKLMNOPQRSTUV, vslope);
_mm512_storeu_ps(output, vacc0123456789ABCDEF);
_mm512_storeu_ps(output + 16, vaccGHIJKLMNOPQRSTUV);
output += 32;
}
for (; batch >= 16 * sizeof(float); batch -= 16 * sizeof(float)) {
__m512 vacc = _mm512_loadu_ps(input);
input += 16;
const __mmask16 vsign = _mm512_cmp_ps_mask(vacc, vzero, _CMP_LT_OQ);
vacc = _mm512_mask_mul_ps(vacc, vsign, vacc, vslope);
_mm512_storeu_ps(output, vacc);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(float));
assert(batch <= 15 * sizeof(float));
// Prepare mask for valid 32-bit elements (depends on batch).
batch >>= XNN_LOG2_SIZEOF_FLOAT;
const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << batch) - UINT32_C(1)));
__m512 vacc = _mm512_maskz_loadu_ps(vmask, input);
const __mmask16 vsign = _mm512_cmp_ps_mask(vacc, vzero, _CMP_LT_OQ);
vacc = _mm512_mask_mul_ps(vacc, vsign, vacc, vslope);
_mm512_mask_storeu_ps(output, vmask, vacc);
}
}
| 2,494 | 35.15942 | 121 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-neon-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__neon_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vslope = vld1q_dup_f32(¶ms->scalar.slope);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
float32x4_t vacc0123 = vmulq_f32(vx0123, vslope);
const uint32x4_t vmask0123 = vcltq_s32(vreinterpretq_s32_f32(vx0123), vmovq_n_s32(0));
vacc0123 = vbslq_f32(vmask0123, vacc0123, vx0123);
vst1q_f32(output, vacc0123); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vacc = vmulq_f32(vx, vslope);
const uint32x4_t vmask = vcltq_s32(vreinterpretq_s32_f32(vx), vmovq_n_s32(0));
vacc = vbslq_f32(vmask, vacc, vx);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 1,638 | 27.754386 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__neon_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float32x4_t vslope = vld1q_dup_f32(¶ms->scalar.slope);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const float32x4_t vx0123 = vld1q_f32(input); input += 4;
const float32x4_t vx4567 = vld1q_f32(input); input += 4;
float32x4_t vacc0123 = vmulq_f32(vx0123, vslope);
const uint32x4_t vmask0123 = vcltq_s32(vreinterpretq_s32_f32(vx0123), vmovq_n_s32(0));
float32x4_t vacc4567 = vmulq_f32(vx4567, vslope);
const uint32x4_t vmask4567 = vcltq_s32(vreinterpretq_s32_f32(vx4567), vmovq_n_s32(0));
vacc0123 = vbslq_f32(vmask0123, vacc0123, vx0123);
vacc4567 = vbslq_f32(vmask4567, vacc4567, vx4567);
vst1q_f32(output, vacc0123); output += 4;
vst1q_f32(output, vacc4567); output += 4;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float32x4_t vx = vld1q_f32(input); input += 4;
float32x4_t vacc = vmulq_f32(vx, vslope);
const uint32x4_t vmask = vcltq_s32(vreinterpretq_s32_f32(vx), vmovq_n_s32(0));
vacc = vbslq_f32(vmask, vacc, vx);
vst1q_f32(output, vacc); output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const float32x4_t vx = vld1q_f32(input);
float32x4_t vacc = vmulq_f32(vx, vslope);
const uint32x4_t vmask = vcltq_s32(vreinterpretq_s32_f32(vx), vmovq_n_s32(0));
vacc = vbslq_f32(vmask, vacc, vx);
float32x2_t vacc_lo = vget_low_f32(vacc);
if (batch & (2 * sizeof(float))) {
vst1_f32(output, vacc_lo); output += 2;
vacc_lo = vget_high_f32(vacc);
}
if (batch & (1 * sizeof(float))) {
vst1_lane_f32(output, vacc_lo, 0);
}
}
}
| 2,283 | 32.101449 | 90 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__scalar_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vslope = params->scalar.slope;
do {
const float vx = *input++;
float vacc = vx * vslope;
vacc = XNN_UNPREDICTABLE(vx < 0.0f) ? vacc : vx;
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
| 887 | 23 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__scalar_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vslope = params->scalar.slope;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
input += 2;
float vacc0 = vx0 * vslope;
float vacc1 = vx1 * vslope;
vacc0 = XNN_UNPREDICTABLE(vx0 < 0.0f) ? vacc0 : vx0;
vacc1 = XNN_UNPREDICTABLE(vx1 < 0.0f) ? vacc1 : vx1;
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const float vx = *input;
float vacc = vx * vslope;
vacc = XNN_UNPREDICTABLE(vx < 0.0f) ? vacc : vx;
*output = vacc;
}
}
| 1,255 | 23.627451 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__scalar_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vslope = params->scalar.slope;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
const float vx2 = input[2];
const float vx3 = input[3];
input += 4;
float vacc0 = vx0 * vslope;
float vacc1 = vx1 * vslope;
float vacc2 = vx2 * vslope;
float vacc3 = vx3 * vslope;
vacc0 = XNN_UNPREDICTABLE(vx0 < 0.0f) ? vacc0 : vx0;
vacc1 = XNN_UNPREDICTABLE(vx1 < 0.0f) ? vacc1 : vx1;
vacc2 = XNN_UNPREDICTABLE(vx2 < 0.0f) ? vacc2 : vx2;
vacc3 = XNN_UNPREDICTABLE(vx3 < 0.0f) ? vacc3 : vx3;
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float vx = *input++;
float vacc = vx * vslope;
vacc = XNN_UNPREDICTABLE(vx < 0.0f) ? vacc : vx;
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 1,620 | 25.145161 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-sse-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__sse_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vslope = _mm_load_ps(params->sse.slope);
const __m128 vzero = _mm_setzero_ps();
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
__m128 vx0123 = _mm_loadu_ps(input);
input += 4;
__m128 vacc0123 = _mm_max_ps(_mm_setzero_ps(), vx0123);
vx0123 = _mm_min_ps(vx0123, vzero);
vacc0123 = _mm_add_ps(vacc0123, _mm_mul_ps(vx0123, vslope));
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx = _mm_loadu_ps(input);
__m128 vacc = _mm_max_ps(_mm_setzero_ps(), vx);
vx = _mm_min_ps(vx, vzero);
vacc = _mm_add_ps(vacc, _mm_mul_ps(vx, vslope));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 1,564 | 25.083333 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-sse-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__sse_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vslope = _mm_load_ps(params->sse.slope);
const __m128 vzero = _mm_setzero_ps();
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
__m128 vx0123 = _mm_loadu_ps(input);
__m128 vx4567 = _mm_loadu_ps(input + 4);
input += 8;
__m128 vacc0123 = _mm_max_ps(_mm_setzero_ps(), vx0123);
vx0123 = _mm_min_ps(vx0123, vzero);
__m128 vacc4567 = _mm_max_ps(_mm_setzero_ps(), vx4567);
vx4567 = _mm_min_ps(vx4567, vzero);
vacc0123 = _mm_add_ps(vacc0123, _mm_mul_ps(vx0123, vslope));
vacc4567 = _mm_add_ps(vacc4567, _mm_mul_ps(vx4567, vslope));
_mm_storeu_ps(output, vacc0123);
_mm_storeu_ps(output + 4, vacc4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
__m128 vx = _mm_loadu_ps(input);
input += 4;
__m128 vacc = _mm_max_ps(_mm_setzero_ps(), vx);
vx = _mm_min_ps(vx, vzero);
vacc = _mm_add_ps(vacc, _mm_mul_ps(vx, vslope));
_mm_storeu_ps(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
__m128 vx = _mm_loadu_ps(input);
__m128 vacc = _mm_max_ps(_mm_setzero_ps(), vx);
vx = _mm_min_ps(vx, vzero);
vacc = _mm_add_ps(vacc, _mm_mul_ps(vx, vslope));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 2,128 | 27.013158 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-sse2-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__sse2_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vslope = _mm_load_ps(params->sse.slope);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
input += 4;
__m128 vacc0123 = _mm_mul_ps(vx0123, vslope);
const __m128 vmask0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx0123)));
vacc0123 = _mm_or_ps(_mm_and_ps(vacc0123, vmask0123), _mm_andnot_ps(vmask0123, vx0123));
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vacc = _mm_mul_ps(vx, vslope);
const __m128 vmask = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vacc = _mm_or_ps(_mm_and_ps(vacc, vmask), _mm_andnot_ps(vmask, vx));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 1,706 | 27.932203 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-sse2-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__sse2_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vslope = _mm_load_ps(params->sse.slope);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
input += 8;
__m128 vacc0123 = _mm_mul_ps(vx0123, vslope);
const __m128 vmask0123 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx0123)));
__m128 vacc4567 = _mm_mul_ps(vx4567, vslope);
const __m128 vmask4567 = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx4567)));
vacc0123 = _mm_or_ps(_mm_and_ps(vacc0123, vmask0123), _mm_andnot_ps(vmask0123, vx0123));
vacc4567 = _mm_or_ps(_mm_and_ps(vacc4567, vmask4567), _mm_andnot_ps(vmask4567, vx4567));
_mm_storeu_ps(output, vacc0123);
_mm_storeu_ps(output + 4, vacc4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
__m128 vacc = _mm_mul_ps(vx, vslope);
const __m128 vmask = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vacc = _mm_or_ps(_mm_and_ps(vacc, vmask), _mm_andnot_ps(vmask, vx));
_mm_storeu_ps(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vacc = _mm_mul_ps(vx, vslope);
const __m128 vmask = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx)));
vacc = _mm_or_ps(_mm_and_ps(vacc, vmask), _mm_andnot_ps(vmask, vx));
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 2,452 | 31.706667 | 110 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-sse41-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__sse41_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vslope = _mm_load_ps(params->sse.slope);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
input += 4;
__m128 vacc0123 = _mm_mul_ps(vx0123, vslope);
vacc0123 = _mm_blendv_ps(vx0123, vacc0123, vx0123);
_mm_storeu_ps(output, vacc0123);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vacc = _mm_mul_ps(vx, vslope);
vacc = _mm_blendv_ps(vx, vacc, vx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 1,423 | 23.982456 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-sse41-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__sse41_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128 vslope = _mm_load_ps(params->sse.slope);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const __m128 vx0123 = _mm_loadu_ps(input);
const __m128 vx4567 = _mm_loadu_ps(input + 4);
input += 8;
__m128 vacc0123 = _mm_mul_ps(vx0123, vslope);
__m128 vacc4567 = _mm_mul_ps(vx4567, vslope);
vacc0123 = _mm_blendv_ps(vx0123, vacc0123, vx0123);
vacc4567 = _mm_blendv_ps(vx4567, vacc4567, vx4567);
_mm_storeu_ps(output, vacc0123);
_mm_storeu_ps(output + 4, vacc4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const __m128 vx = _mm_loadu_ps(input);
input += 4;
__m128 vacc = _mm_mul_ps(vx, vslope);
vacc = _mm_blendv_ps(vx, vacc, vx);
_mm_storeu_ps(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const __m128 vx = _mm_loadu_ps(input);
__m128 vacc = _mm_mul_ps(vx, vslope);
vacc = _mm_blendv_ps(vx, vacc, vx);
if (batch & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vacc);
vacc = _mm_movehl_ps(vacc, vacc);
output += 2;
}
if (batch & (1 * sizeof(float))) {
_mm_store_ss(output, vacc);
}
}
}
| 1,885 | 25.56338 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-wasm-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/wasm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__wasm_x1(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vslope = params->scalar.slope;
const float vzero = 0.0f;
do {
const float vx = *input++;
const float vnegx = __builtin_wasm_min_f32(vx, vzero);
float vacc = vnegx * vslope;
const float vposx = __builtin_wasm_max_f32(vx, vzero);
vacc += vposx;
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
| 998 | 23.975 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-wasm-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/wasm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__wasm_x2(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vslope = params->scalar.slope;
const float vzero = 0.0f;
for (; batch >= 2 * sizeof(float); batch -= 2 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
input += 2;
const float vnegx0 = __builtin_wasm_min_f32(vx0, vzero);
const float vnegx1 = __builtin_wasm_min_f32(vx1, vzero);
float vacc0 = vnegx0 * vslope;
const float vposx0 = __builtin_wasm_max_f32(vx0, vzero);
float vacc1 = vnegx1 * vslope;
const float vposx1 = __builtin_wasm_max_f32(vx1, vzero);
vacc0 += vposx0;
vacc1 += vposx1;
output[0] = vacc0;
output[1] = vacc1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const float vx = *input;
const float vnegx = __builtin_wasm_min_f32(vx, vzero);
float vacc = vnegx * vslope;
const float vposx = __builtin_wasm_max_f32(vx, vzero);
vacc += vposx;
*output = vacc;
}
}
| 1,545 | 25.20339 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-wasm-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/wasm.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__wasm_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const float vslope = params->scalar.slope;
const float vzero = 0.0f;
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const float vx0 = input[0];
const float vx1 = input[1];
const float vx2 = input[2];
const float vx3 = input[3];
input += 4;
const float vnegx0 = __builtin_wasm_min_f32(vx0, vzero);
const float vnegx1 = __builtin_wasm_min_f32(vx1, vzero);
const float vnegx2 = __builtin_wasm_min_f32(vx2, vzero);
const float vnegx3 = __builtin_wasm_min_f32(vx3, vzero);
float vacc0 = vnegx0 * vslope;
const float vposx0 = __builtin_wasm_max_f32(vx0, vzero);
float vacc1 = vnegx1 * vslope;
const float vposx1 = __builtin_wasm_max_f32(vx1, vzero);
float vacc2 = vnegx2 * vslope;
const float vposx2 = __builtin_wasm_max_f32(vx2, vzero);
float vacc3 = vnegx3 * vslope;
const float vposx3 = __builtin_wasm_max_f32(vx3, vzero);
vacc0 += vposx0;
vacc1 += vposx1;
vacc2 += vposx2;
vacc3 += vposx3;
output[0] = vacc0;
output[1] = vacc1;
output[2] = vacc2;
output[3] = vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const float vx = *input++;
const float vnegx = __builtin_wasm_min_f32(vx, vzero);
float vacc = vnegx * vslope;
const float vposx = __builtin_wasm_max_f32(vx, vzero);
vacc += vposx;
*output++ = vacc;
batch -= sizeof(float);
} while (batch != 0);
}
}
| 2,092 | 27.283784 | 74 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-wasmrelaxedsimd-iminmax-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/wasmsimd-iminmax.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__wasmrelaxedsimd_iminmax_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vslope = wasm_v128_load64_splat(params->wasmsimd.slope);
const v128_t vzero = wasm_i32x4_const_splat(0);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vacc = wasm_i32x4_max(vx, vzero);
vx = wasm_i32x4_min(vx, vzero);
vacc = __builtin_wasm_relaxed_madd_f32x4(vx, vslope, vacc);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
v128_t vacc = wasm_i32x4_max(vx, vzero);
vx = wasm_i32x4_min(vx, vzero);
vacc = __builtin_wasm_relaxed_madd_f32x4(vx, vslope, vacc);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,634 | 28.196429 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-wasmrelaxedsimd-iminmax-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/wasmsimd-iminmax.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__wasmrelaxedsimd_iminmax_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vslope = wasm_v128_load64_splat(params->wasmsimd.slope);
const v128_t vzero = wasm_i32x4_const_splat(0);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
v128_t vacc0123 = wasm_i32x4_max(vx0123, vzero);
vx0123 = wasm_i32x4_min(vx0123, vzero);
v128_t vacc4567 = wasm_i32x4_max(vx4567, vzero);
vx4567 = wasm_i32x4_min(vx4567, vzero);
vacc0123 = __builtin_wasm_relaxed_madd_f32x4(vx0123, vslope, vacc0123);
vacc4567 = __builtin_wasm_relaxed_madd_f32x4(vx4567, vslope, vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vacc = wasm_i32x4_max(vx, vzero);
vx = wasm_i32x4_min(vx, vzero);
vacc = __builtin_wasm_relaxed_madd_f32x4(vx, vslope, vacc);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
v128_t vacc = wasm_i32x4_max(vx, vzero);
vx = wasm_i32x4_min(vx, vzero);
vacc = __builtin_wasm_relaxed_madd_f32x4(vx, vslope, vacc);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 2,259 | 29.958904 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-wasmrelaxedsimd-laneselect-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/wasmsimd-laneselect.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__wasmrelaxedsimd_laneselect_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vslope = wasm_v128_load64_splat(params->wasmsimd.slope);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vacc = wasm_f32x4_mul(vx, vslope);
const v128_t vmask = wasm_i32x4_shr(vx, 31);
vacc = __builtin_wasm_relaxed_laneselect_i32x4(vacc, vx, vmask);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vacc = wasm_f32x4_mul(vx, vslope);
const v128_t vmask = wasm_i32x4_shr(vx, 31);
vacc = __builtin_wasm_relaxed_laneselect_i32x4(vacc, vx, vmask);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,640 | 28.836364 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-wasmrelaxedsimd-laneselect-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/wasmsimd-laneselect.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__wasmrelaxedsimd_laneselect_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vslope = wasm_v128_load64_splat(params->wasmsimd.slope);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
v128_t vacc0123 = wasm_f32x4_mul(vx0123, vslope);
const v128_t vmask0123 = wasm_i32x4_shr(vx0123, 31);
v128_t vacc4567 = wasm_f32x4_mul(vx4567, vslope);
const v128_t vmask4567 = wasm_i32x4_shr(vx4567, 31);
vacc0123 = __builtin_wasm_relaxed_laneselect_i32x4(vacc0123, vx0123, vmask0123);
vacc4567 = __builtin_wasm_relaxed_laneselect_i32x4(vacc4567, vx4567, vmask4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vacc = wasm_f32x4_mul(vx, vslope);
const v128_t vmask = wasm_i32x4_shr(vx, 31);
vacc = __builtin_wasm_relaxed_laneselect_i32x4(vacc, vx, vmask);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vacc = wasm_f32x4_mul(vx, vslope);
const v128_t vmask = wasm_i32x4_shr(vx, 31);
vacc = __builtin_wasm_relaxed_laneselect_i32x4(vacc, vx, vmask);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 2,323 | 31.277778 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-wasmsimd-iminmax-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/wasmsimd-iminmax.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__wasmsimd_iminmax_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vslope = wasm_v128_load64_splat(params->wasmsimd.slope);
const v128_t vzero = wasm_i32x4_const_splat(0);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vacc = wasm_i32x4_max(vx, vzero);
vx = wasm_i32x4_min(vx, vzero);
vacc = wasm_f32x4_add(wasm_f32x4_mul(vx, vslope), vacc);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
v128_t vacc = wasm_i32x4_max(vx, vzero);
vx = wasm_i32x4_min(vx, vzero);
vacc = wasm_f32x4_add(wasm_f32x4_mul(vx, vslope), vacc);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,621 | 27.964286 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-wasmsimd-iminmax-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/wasmsimd-iminmax.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__wasmsimd_iminmax_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vslope = wasm_v128_load64_splat(params->wasmsimd.slope);
const v128_t vzero = wasm_i32x4_const_splat(0);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
v128_t vx0123 = wasm_v128_load(input);
v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
v128_t vacc0123 = wasm_i32x4_max(vx0123, vzero);
vx0123 = wasm_i32x4_min(vx0123, vzero);
v128_t vacc4567 = wasm_i32x4_max(vx4567, vzero);
vx4567 = wasm_i32x4_min(vx4567, vzero);
vacc0123 = wasm_f32x4_add(wasm_f32x4_mul(vx0123, vslope), vacc0123);
vacc4567 = wasm_f32x4_add(wasm_f32x4_mul(vx4567, vslope), vacc4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vacc = wasm_i32x4_max(vx, vzero);
vx = wasm_i32x4_min(vx, vzero);
vacc = wasm_f32x4_add(wasm_f32x4_mul(vx, vslope), vacc);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
v128_t vx = wasm_v128_load(input);
v128_t vacc = wasm_i32x4_max(vx, vzero);
vx = wasm_i32x4_min(vx, vzero);
vacc = wasm_f32x4_add(wasm_f32x4_mul(vx, vslope), vacc);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 2,240 | 29.69863 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-wasmsimd-laneselect-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/wasmsimd-laneselect.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__wasmsimd_laneselect_x4(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vslope = wasm_v128_load64_splat(params->wasmsimd.slope);
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vacc = wasm_f32x4_mul(vx, vslope);
const v128_t vmask = wasm_i32x4_shr(vx, 31);
vacc = wasm_v128_bitselect(vacc, vx, vmask);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vacc = wasm_f32x4_mul(vx, vslope);
const v128_t vmask = wasm_i32x4_shr(vx, 31);
vacc = wasm_v128_bitselect(vacc, vx, vmask);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 1,593 | 27.981818 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vlrelu/gen/f32-vlrelu-wasmsimd-laneselect-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vlrelu/wasmsimd-laneselect.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_vlrelu_ukernel__wasmsimd_laneselect_x8(
size_t batch,
const float* input,
float* output,
const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(float) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vslope = wasm_v128_load64_splat(params->wasmsimd.slope);
for (; batch >= 8 * sizeof(float); batch -= 8 * sizeof(float)) {
const v128_t vx0123 = wasm_v128_load(input);
const v128_t vx4567 = wasm_v128_load(input + 4);
input += 8;
v128_t vacc0123 = wasm_f32x4_mul(vx0123, vslope);
const v128_t vmask0123 = wasm_i32x4_shr(vx0123, 31);
v128_t vacc4567 = wasm_f32x4_mul(vx4567, vslope);
const v128_t vmask4567 = wasm_i32x4_shr(vx4567, 31);
vacc0123 = wasm_v128_bitselect(vacc0123, vx0123, vmask0123);
vacc4567 = wasm_v128_bitselect(vacc4567, vx4567, vmask4567);
wasm_v128_store(output, vacc0123);
wasm_v128_store(output + 4, vacc4567);
output += 8;
}
for (; batch >= 4 * sizeof(float); batch -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(input);
input += 4;
v128_t vacc = wasm_f32x4_mul(vx, vslope);
const v128_t vmask = wasm_i32x4_shr(vx, 31);
vacc = wasm_v128_bitselect(vacc, vx, vmask);
wasm_v128_store(output, vacc);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const v128_t vx = wasm_v128_load(input);
v128_t vacc = wasm_f32x4_mul(vx, vslope);
const v128_t vmask = wasm_i32x4_shr(vx, 31);
vacc = wasm_v128_bitselect(vacc, vx, vmask);
if (batch & (2 * sizeof(float))) {
wasm_v128_store64_lane(output, vacc, 0);
vacc = wasm_v64x2_shuffle(vacc, vacc, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(float))) {
wasm_v128_store32_lane(output, vacc, 0);
}
}
}
| 2,236 | 30.069444 | 88 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vmulcaddc/gen/f32-vmulcaddc-c1-minmax-scalar-2x.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vmulcaddc/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vmulcaddc.h>
void xnn_f32_vmulcaddc_minmax_ukernel_c1__scalar_2x(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
do {
const float vscale = w[0];
float vacc0 = *i0++;
float vacc1 = *i1++;
const float vbias = w[1];
vacc0 = vacc0 * vscale + vbias;
vacc1 = vacc1 * vscale + vbias;
vacc0 = math_max_f32(vacc0, vmin);
vacc1 = math_max_f32(vacc1, vmin);
vacc0 = math_min_f32(vacc0, vmax);
vacc1 = math_min_f32(vacc1, vmax);
*o0++ = vacc0;
*o1++ = vacc1;
w += 2;
c -= sizeof(float);
} while (c != 0);
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 2,041 | 25.179487 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vmulcaddc/gen/f32-vmulcaddc-c1-minmax-wasm-2x.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vmulcaddc/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vmulcaddc.h>
void xnn_f32_vmulcaddc_minmax_ukernel_c1__wasm_2x(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
do {
const float vscale = w[0];
float vacc0 = *i0++;
float vacc1 = *i1++;
const float vbias = w[1];
vacc0 = vacc0 * vscale + vbias;
vacc1 = vacc1 * vscale + vbias;
vacc0 = __builtin_wasm_max_f32(vacc0, vmin);
vacc1 = __builtin_wasm_max_f32(vacc1, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
vacc1 = __builtin_wasm_min_f32(vacc1, vmax);
*o0++ = vacc0;
*o1++ = vacc1;
w += 2;
c -= sizeof(float);
} while (c != 0);
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 2,079 | 25.666667 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vmulcaddc/gen/f32-vmulcaddc-c2-minmax-scalar-2x.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vmulcaddc/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vmulcaddc.h>
void xnn_f32_vmulcaddc_minmax_ukernel_c2__scalar_2x(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 2 * sizeof(float); c -= 2 * sizeof(float)) {
const float vscale0 = w[0];
const float vscale1 = w[1];
float vacc0x0 = i0[0];
float vacc0x1 = i0[1];
i0 += 2;
float vacc1x0 = i1[0];
float vacc1x1 = i1[1];
i1 += 2;
const float vbias0 = w[2];
const float vbias1 = w[3];
vacc0x0 = vacc0x0 * vscale0 + vbias0;
vacc0x1 = vacc0x1 * vscale1 + vbias1;
vacc1x0 = vacc1x0 * vscale0 + vbias0;
vacc1x1 = vacc1x1 * vscale1 + vbias1;
vacc0x0 = math_max_f32(vacc0x0, vmin);
vacc0x1 = math_max_f32(vacc0x1, vmin);
vacc1x0 = math_max_f32(vacc1x0, vmin);
vacc1x1 = math_max_f32(vacc1x1, vmin);
vacc0x0 = math_min_f32(vacc0x0, vmax);
vacc0x1 = math_min_f32(vacc0x1, vmax);
vacc1x0 = math_min_f32(vacc1x0, vmax);
vacc1x1 = math_min_f32(vacc1x1, vmax);
o0[0] = vacc0x0;
o0[1] = vacc0x1;
o0 += 2;
o1[0] = vacc1x0;
o1[1] = vacc1x1;
o1 += 2;
w += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
const float vscale = *w++;
float vacc0 = *i0++;
float vacc1 = *i1++;
const float vbias = w[1];
vacc0 = vacc0 * vscale + vbias;
vacc1 = vacc1 * vscale + vbias;
vacc0 = math_max_f32(vacc0, vmin);
vacc1 = math_max_f32(vacc1, vmin);
vacc0 = math_min_f32(vacc0, vmax);
vacc1 = math_min_f32(vacc1, vmax);
*o0++ = vacc0;
*o1++ = vacc1;
c -= sizeof(float);
} while (c != 0);
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 3,119 | 25.666667 | 75 |
c
|
XNNPACK
|
XNNPACK-master/src/f32-vmulcaddc/gen/f32-vmulcaddc-c2-minmax-wasm-2x.c
|
// Auto-generated file. Do not edit!
// Template: src/f32-vmulcaddc/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vmulcaddc.h>
void xnn_f32_vmulcaddc_minmax_ukernel_c2__wasm_2x(
size_t rows,
size_t channels,
const float* restrict input,
size_t input_stride,
const float* restrict weights,
float* restrict output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const size_t input_increment = input_stride * 2 - channels;
const size_t output_increment = output_stride * 2 - channels;
const float* i0 = input;
float* o0 = output;
const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
float* o1 = (float*) ((uintptr_t) o0 + output_stride);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
do {
if XNN_UNPREDICTABLE(rows < 2) {
i1 = i0;
o1 = o0;
}
const float* w = weights;
size_t c = channels;
for (; c >= 2 * sizeof(float); c -= 2 * sizeof(float)) {
const float vscale0 = w[0];
const float vscale1 = w[1];
float vacc0x0 = i0[0];
float vacc0x1 = i0[1];
i0 += 2;
float vacc1x0 = i1[0];
float vacc1x1 = i1[1];
i1 += 2;
const float vbias0 = w[2];
const float vbias1 = w[3];
vacc0x0 = vacc0x0 * vscale0 + vbias0;
vacc0x1 = vacc0x1 * vscale1 + vbias1;
vacc1x0 = vacc1x0 * vscale0 + vbias0;
vacc1x1 = vacc1x1 * vscale1 + vbias1;
vacc0x0 = __builtin_wasm_max_f32(vacc0x0, vmin);
vacc0x1 = __builtin_wasm_max_f32(vacc0x1, vmin);
vacc1x0 = __builtin_wasm_max_f32(vacc1x0, vmin);
vacc1x1 = __builtin_wasm_max_f32(vacc1x1, vmin);
vacc0x0 = __builtin_wasm_min_f32(vacc0x0, vmax);
vacc0x1 = __builtin_wasm_min_f32(vacc0x1, vmax);
vacc1x0 = __builtin_wasm_min_f32(vacc1x0, vmax);
vacc1x1 = __builtin_wasm_min_f32(vacc1x1, vmax);
o0[0] = vacc0x0;
o0[1] = vacc0x1;
o0 += 2;
o1[0] = vacc1x0;
o1[1] = vacc1x1;
o1 += 2;
w += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
const float vscale = *w++;
float vacc0 = *i0++;
float vacc1 = *i1++;
const float vbias = w[1];
vacc0 = vacc0 * vscale + vbias;
vacc1 = vacc1 * vscale + vbias;
vacc0 = __builtin_wasm_max_f32(vacc0, vmin);
vacc1 = __builtin_wasm_max_f32(vacc1, vmin);
vacc0 = __builtin_wasm_min_f32(vacc0, vmax);
vacc1 = __builtin_wasm_min_f32(vacc1, vmax);
*o0++ = vacc0;
*o1++ = vacc1;
c -= sizeof(float);
} while (c != 0);
}
i0 = (const float*) ((uintptr_t) i0 + input_increment);
o0 = (float*) ((uintptr_t) o0 + output_increment);
i1 = (const float*) ((uintptr_t) i1 + input_increment);
o1 = (float*) ((uintptr_t) o1 + output_increment);
rows = doz(rows, 2);
} while (rows != 0);
}
| 3,237 | 26.675214 | 75 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.